{"projects": [{"components": [{"description": "", "name": "VM Pooler"}], "description": "", "externalName": "VM Pooler (Archived)", "issues": [{"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34967227", "created": "2023-08-03T06:56:00.000000"}], "components": [], "created": "2023-07-10T08:13:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@524942dc"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4xd:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2069019715_*|*_6_*:*_1_*:*_0"}], "description": "The main process should probably exit if all connection attempts to redis have failed.\n\nAs of today it does not exit, which means that kubernetes sees the Pod is in a running state and ArgoCD shows the app in a healthy state, so the pods have to be manually restarted. If the process were to exit, then kubernetes should see the Pod has entered a Failed state and automatically restart it.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63829", "fixedVersions": [], "id": "63829", "issueType": "Bug", "key": "POOLER-229", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Minor", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:56:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:56 AM", "summary": "Exit main process if all redis connection attempts fail", "timeSpent": "PT0S", "updated": "2023-08-03T06:56:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34967262", "created": "2023-08-03T06:56:00.000000"}], "components": [], "created": "2023-05-01T07:57:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6f14f5bd"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4kx:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8117915579_*|*_6_*:*_1_*:*_0"}], "description": "With DNS plugin introduction in vmpooler 3.x, a bug was introduced in the manager where it fails to start if no dns gems are included in dns_configs. Manager should be allowed to run only with a dynamic-dns config.\n\n_Issue created in Slack using_  *_/jira create_* _._", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63763", "fixedVersions": ["vmpooler-3.2.0"], "id": "63763", "issueType": "Bug", "key": "POOLER-228", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Minor", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:56:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:56 AM", "summary": "Fix startup error when not using any dns plugins", "timeSpent": "PT0S", "updated": "2023-08-03T06:56:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": [], "created": "2023-05-01T06:10:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@160cf622"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4kp:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8122177633_*|*_6_*:*_1_*:*_0"}], "description": "With issue management in Jira Cloud, I think we should add fixed issues to the changelog generation while still keeping github changelog generation for merged PRs, if possible. When an issue is fixed we could add a tag named <COMPONENT_NAME-NEXT_RELEASE> and maybe use the [jira-ruby|https://rubygems.org/gems/jira-ruby] gem to query for those issues.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63762", "fixedVersions": ["vmpooler-3.2.0"], "id": "63762", "issueType": "Task", "key": "POOLER-227", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Won't Do", "resolutionDate": "2023-08-03T06:20:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:20 AM", "summary": "Add Jira Issues to Changelog Generation", "timeSpent": "PT0S", "updated": "2023-08-03T06:20:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmfloaty/issues/52", "created": "2023-08-03T06:24:00.000000"}], "components": [], "created": "2023-04-20T09:12:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1d7adfe4"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4hl:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9061973009_*|*_6_*:*_1_*:*_0"}], "description": "Right now the token command prints out the token, but doesn't have a way to write to your .vmfloaty.yml file.  Any reason for this other than time / can I add that?\n\n\n\n{quote}The only issue I think is when a config file already exists with existing config options. Maybe we could add a warning that the values will be overridden and add a {{y/N}} prompt to prevent people accidentally overriding their previous config options.{quote}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63746", "fixedVersions": [], "id": "63746", "issueType": "New Feature", "key": "POOLER-226", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:24:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:24 AM", "summary": "Adding --save to token command", "timeSpent": "PT0S", "updated": "2023-08-03T06:24:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issues exists at https://github.com/puppetlabs/vmfloaty/issues/101", "created": "2023-08-03T06:25:00.000000"}], "components": [], "created": "2023-04-20T09:10:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3aff335e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4hd:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9062075762_*|*_6_*:*_1_*:*_0"}], "description": "h2. Describe the Bug\n\nNeither bash nor zsh completions include the new service command.\n\nh2. Expected Behavior\n\nI expect all commands to be represented\n\nh2. Environment\n\n* Version 1.0.0\n* Platform macOS 10.15.6\n\n\n\nit has two sub commands that do not currently take parameters:\n\n{quote}floaty service types\nfloaty service examples{quote}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63745", "fixedVersions": [], "id": "63745", "issueType": "Bug", "key": "POOLER-225", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:25:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:25 AM", "summary": "shell completion does not include new service command", "timeSpent": "PT0S", "updated": "2023-08-03T06:25:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmfloaty/issues/129", "created": "2023-08-03T06:25:00.000000"}], "components": [], "created": "2023-04-20T09:08:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@34f5f92d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4h5:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9062209097_*|*_6_*:*_1_*:*_0"}], "description": "Running {{floaty get token --trace}} with floaty 1.1.1, I get\n\n{noformat}No token found. Retrieving a token...\nEnter your <https://abs-prod.k8s.infracore.puppet.net/api/v1> service password:\n**************\nTraceback (most recent call last):\n\t15: from /Users/michaelsmith/.rbenv/versions/2.6.5/bin/floaty:23:in `<main>'\n\t14: from /Users/michaelsmith/.rbenv/versions/2.6.5/bin/floaty:23:in `load'\n\t13: from /Users/michaelsmith/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems/vmfloaty-1.1.1/bin/floaty:8:in `<top (required)>'\n\t12: from /Users/michaelsmith/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems/vmfloaty-1.1.1/lib/vmfloaty.rb:569:in `run'\n\t11: from /Users/michaelsmith/.gem/ruby/2.6.0/gems/commander-4.4.7/lib/commander/delegates.rb:15:in `run!'\n\t10: from /Users/michaelsmith/.gem/ruby/2.6.0/gems/commander-4.4.7/lib/commander/runner.rb:68:in `run!'\n\t 9: from /Users/michaelsmith/.gem/ruby/2.6.0/gems/commander-4.4.7/lib/commander/runner.rb:446:in `run_active_command'\n\t 8: from /Users/michaelsmith/.gem/ruby/2.6.0/gems/commander-4.4.7/lib/commander/command.rb:153:in `run'\n\t 7: from /Users/michaelsmith/.gem/ruby/2.6.0/gems/commander-4.4.7/lib/commander/command.rb:182:in `call'\n\t 6: from /Users/michaelsmith/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems/vmfloaty-1.1.1/lib/vmfloaty.rb:68:in `block (2 levels) in run'\n\t 5: from /Users/michaelsmith/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems/vmfloaty-1.1.1/lib/vmfloaty/service.rb:82:in `retrieve'\n\t 4: from /Users/michaelsmith/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems/vmfloaty-1.1.1/lib/vmfloaty/service.rb:50:in `token'\n\t 3: from /Users/michaelsmith/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems/vmfloaty-1.1.1/lib/vmfloaty/service.rb:58:in `get_new_token'\n\t 2: from /Users/michaelsmith/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems/vmfloaty-1.1.1/lib/vmfloaty/auth.rb:14:in `get_token'\n\t 1: from /Users/michaelsmith/.gem/ruby/2.6.0/gems/json-2.3.0/lib/json/common.rb:156:in `parse'\n/Users/michaelsmith/.gem/ruby/2.6.0/gems/json-2.3.0/lib/json/common.rb:156:in `parse': 783: unexpected token at 'POST /api/v1/token' (JSON::ParserError){noformat}\n\n\n\n{quote}I'd say it's probably still worth fixing up the stack trace so it doesn't fail to parse json! Feel free to reopen...not sure who is maintaining this these days but it would be an easy contribution :){quote}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63744", "fixedVersions": [], "id": "63744", "issueType": "Bug", "key": "POOLER-224", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:25:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:25 AM", "summary": "Stack trace getting token", "timeSpent": "PT0S", "updated": "2023-08-03T06:25:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmfloaty/issues/161", "created": "2023-08-03T06:25:00.000000"}], "components": [], "created": "2023-04-20T09:07:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7fbb95a8"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4gx:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9062326436_*|*_6_*:*_1_*:*_0"}], "description": "Since Faraday 2.0 has been released, we should upgrade according to [https://github.com/lostisland/faraday/blob/main/UPGRADING.md|https://github.com/lostisland/faraday/blob/main/UPGRADING.md|smart-link] ", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63743", "fixedVersions": [], "id": "63743", "issueType": "Task", "key": "POOLER-223", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:25:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:25 AM", "summary": "Upgrade to Faraday 2.0", "timeSpent": "PT0S", "updated": "2023-08-03T06:25:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmpooler/issues/184", "created": "2023-08-03T06:26:00.000000"}], "components": [], "created": "2023-04-20T09:03:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2fd727b2"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4gp:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9062568586_*|*_6_*:*_1_*:*_0"}], "description": "The codebase uses a lot of threading but doesn't seem to handle locks etc. e.g. The ruby array class is not necessarily thread safe.\n\nShould probably move to a more modern implementation for threads, possibly using;\n[https://github.com/ruby-concurrency/concurrent-ruby|https://github.com/ruby-concurrency/concurrent-ruby|smart-link] ", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63742", "fixedVersions": [], "id": "63742", "issueType": "Improvement", "key": "POOLER-222", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:26:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:26 AM", "summary": "Should move to a modern thread handling library", "timeSpent": "PT0S", "updated": "2023-08-03T06:26:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmpooler/issues/186", "created": "2023-08-03T06:27:00.000000"}], "components": [], "created": "2023-04-20T09:02:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6cf194d5"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4gh:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9062671973_*|*_6_*:*_1_*:*_0"}], "description": "The terms pool, vm, host and template change meaning in the codebase.\n\ne.g.\n[https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L334-L338|https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L334-L338|smart-link] \nNote that we get the template name, but then use it as a pool name\n\n{{clone_vm}} is the worst offender\nin [https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L199|https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L199|smart-link]  we find the VM name to clone from (the template) but then in [https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L213|https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L213|smart-link]  we use template as the pool name.  This introduces an error where the VM Template we clone from MUST be named the same as the pool name.\n\nThe spec helpers interchangeably use template and pool\n\nIn [https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/api/v1.rb#L27-L29|https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/api/v1.rb#L27-L29] the function called {{pool_exists?}} takes a single parameter of {{template}}\n\nIn {{pool_manager.rb}} there are a lot calls to {{host = vsphere.find_vm(vm)}} however the concept of a host is different in [https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L494|https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L494|smart-link]  where a host refers to the computer HOSTING the VM\n\n... and so on.\n\nThe naming and usage should be modified to be consistent to make it easier to both work on the codebase, and easier to test.  I would propose the following definitions:\n\nvm - The short name of a VM (i.e. not the FQDN, but the hostname only).  This is normally used as a key in redis etc.\nvm_object - An object, of some sort (normally a vSphere object) that represents the VM\n\npool - The name of a pool of VMs.  This is normally used as a key in redis, in the config file (pools)\npool_config - The configuration of a pool from configuration file (usually a hash table)\n\nhost - The short name of the computer on which a VM resides\nhost_object - An object, of some sort (normally a vSphere object) that represents the host on which a VM resides\n\ntemplate - The short name of the VM which is cloned to produce VMs for a pool\n\nObviously this is not an exhaustive list and there are derivatives of these names e.g. in the VM migration code there will be a current_host and move_to_host concept.\n\n\n\n{quote}As far as implementation. I would probably start with renaming the variables in the spec tests and then look at renaming the code itself.{quote}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63741", "fixedVersions": [], "id": "63741", "issueType": "Improvement", "key": "POOLER-221", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:27:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:27 AM", "summary": "Use consistent meanings for vm, pool, host and template", "timeSpent": "PT0S", "updated": "2023-08-03T06:27:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmpooler/issues/201", "created": "2023-08-03T06:27:00.000000"}], "components": [], "created": "2023-04-20T09:01:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@77d624f7"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4g9:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9062774365_*|*_6_*:*_1_*:*_0"}], "description": "The code in [https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/vsphere_helper.rb#L216-L221|https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/vsphere_helper.rb#L216-L221] seems very fragile by only using the 5th element in a CPU description and doesn't seem to take into account VMWare EVC ([https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003212)|https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003212)] which reduces the vCPU features presented to the guests.\n\nThis should perhaps use a better method of arch detection\n\n\n\n{quote}It's true. It probably makes more sense to detect whether EVC is configured. When it is not, then perhaps falling back to determining compatibility based on ESXi compatibility version.{quote}\n\n\n\n{quote}The biggest arch problem tends to be AMD vs Intel as opposed to, say, v3 or V4 of an Intel processor.\n\nIt would be nice if vSphere offered an API to say \"Can I vMotion from Host1 to Host2?\" but alas ...{quote}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63740", "fixedVersions": [], "id": "63740", "issueType": "Improvement", "key": "POOLER-220", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:27:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:27 AM", "summary": "CPU Arch detection appears fragile and doesn't take into account Vmware EVC", "timeSpent": "PT0S", "updated": "2023-08-03T06:27:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmpooler/issues/206", "created": "2023-08-03T06:27:00.000000"}], "components": [], "created": "2023-04-20T08:58:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5463f970"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4g1:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9062934177_*|*_6_*:*_1_*:*_0"}], "description": "The {{find_least_used_compatible_host}} in the {{vsphere_helper}} throws an {{comparison of Array with Array failed}} error if two or more hosts shared the same utilisation.\n\nThis appears to be because Array.sort does not like/support this scenario in\n[https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/vsphere_helper.rb#L269|https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/vsphere_helper.rb#L269]\n\nInstead we may need to implement our own comparison operator e.g. (this is an example only)\n\n{code:ruby}target_host = target_hosts.sort do |x,y|\n  x <=> y || 1\nend{code}\n\nAlso affects {{find_least_used_host}}\n\nThis is no longer in vsphere_helper, but is still a bug.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63739", "fixedVersions": [], "id": "63739", "issueType": "Bug", "key": "POOLER-219", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:27:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:27 AM", "summary": "find_least_used_compatible_host in vsphere_helper does not cope with hosts with the same utilisation", "timeSpent": "PT0S", "updated": "2023-08-03T06:27:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmpooler/issues/282", "created": "2023-08-03T06:55:00.000000"}], "components": [], "created": "2023-04-20T08:57:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@429faef9"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4ft:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9064721291_*|*_6_*:*_1_*:*_0"}], "description": "From [#218|https://github.com/puppetlabs/vmpooler/issues/218]\n\nFound a small bug making the API choke on hostnames containing a dash:\n\nlib/vmpooler/api/helpers.rb:120 reads:\n\nif domain && hostname =~ /^\\w+\\.#{domain}$/\nbut should be:\n\nif domain && hostname =~ /^[a-zA-Z0-9-]+\\.#{domain}$/", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63738", "fixedVersions": [], "id": "63738", "issueType": "Bug", "key": "POOLER-218", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:55:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:55 AM", "summary": "API hostname regex should support a dash in hostname", "timeSpent": "PT0S", "updated": "2023-08-03T06:55:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Issue exists at https://github.com/puppetlabs/vmpooler/issues/206", "created": "2023-08-03T06:28:00.000000"}], "components": [], "created": "2023-04-20T08:53:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@22a8cd43"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4fl:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9063255389_*|*_6_*:*_1_*:*_0"}], "description": "Should fix this before it gets out of hand.  Why the triple stacked classes?\n\n{code:ruby}module Vmpooler\n  class PoolManager\n    class Provider\n      class VSphere\n\n      end\n    end\n  end\nend{code}\n\nI don't understand why someone coded this way.  We can fix by using module namespaces instead.  However since there is also a pool manager class we also just need to remove that bit too.\n\nExample:\n\n{code:ruby}module Vmpooler\n    module Provider\n      class VSphere\n\n      end\n    end\nend\n{code}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63737", "fixedVersions": [], "id": "63737", "issueType": "Improvement", "key": "POOLER-217", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:28:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:28 AM", "summary": "bad naming convention for providers", "timeSpent": "PT0S", "updated": "2023-08-03T06:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "61af7ccdc75da800725247ff", "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34967075", "created": "2023-08-03T06:52:00.000000"}], "components": [], "created": "2023-04-20T08:02:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2ffc79f7"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4fd:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9067836172_*|*_6_*:*_1_*:*_0"}], "description": "I noticed at least a couple places in the logs of newer redis versions with deprecations:\n\n{noformat}Redis#sadd will always return an Integer in Redis 5.0.0. Use Redis#sadd? instead.(called from: /usr/local/bundle/gems/vmpooler-2.4.0/lib/vmpooler/pool_manager.rb:166:in `block in move_pending_vm_to_ready'){noformat}\n\n{noformat}Redis#sadd will always return an Integer in Redis 5.0.0. Use Redis#sadd? instead.(called from: /usr/local/bundle/gems/vmpooler-2.4.0/lib/vmpooler/pool_manager.rb:425:in `block in _clone_vm'){noformat}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63736", "fixedVersions": [], "id": "63736", "issueType": "Task", "key": "POOLER-216", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:52:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:52 AM", "summary": "Redis 5.x Deprecations", "timeSpent": "PT0S", "updated": "2023-08-03T06:52:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34967057", "created": "2023-08-03T06:52:00.000000"}], "components": [], "created": "2023-03-23T12:09:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@740df714"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4a9:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_11472144343_*|*_6_*:*_1_*:*_0"}], "description": "We should implement otel tracing in pool manager, so that we can debug and track performance changes in the class.\n\n_Issue created in Slack using_  *_/jira create_* _._", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63705", "fixedVersions": [], "id": "63705", "issueType": "Improvement", "key": "POOLER-215", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Minor", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:52:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:52 AM", "summary": "Implement tracing in pool manager", "timeSpent": "PT0S", "updated": "2023-08-03T06:52:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Duplicate of https://puppet.atlassian.net/browse/POOLER-169", "created": "2023-08-03T06:32:00.000000"}], "components": [], "created": "2023-03-22T13:01:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@21576627"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0m4a1:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_11554245167_*|*_6_*:*_1_*:*_0"}], "description": "Anonymous users should not be able to request a VM, this operation should require a token.\n\n_Issue created in Slack using_  *_/jira create_* _._", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "63704", "fixedVersions": [], "id": "63704", "issueType": "Bug", "key": "POOLER-214", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Minor", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Duplicate", "resolutionDate": "2023-08-03T06:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:32 AM", "summary": "Enforce Authentication for Requesting a VM", "timeSpent": "PT0S", "updated": "2023-08-03T06:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Already decoupled and implemented cloud dnd plugin", "created": "2023-08-03T06:31:00.000000"}], "components": [], "created": "2022-09-19T07:58:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@46e217da"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0jry9:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_27469982728_*|*_5_*:*_1_*:*_0"}], "description": "With the concept of multiple providers we should separate out DNS record provisioning into their own gems to configure the desired DNS provider (A gem for each DNS platform similar to how we have one for each compute platform). For example, I want to use both the EC2 and GCE providers for VMs, but want to provision DNS records only in Google or only in AWS (or some other supported DNS platform).", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10073", "fixedVersions": [], "id": "10073", "issueType": "Task", "key": "POOLER-202", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Done", "resolutionDate": "2023-08-03T06:31:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:31 AM", "summary": "Separate DNS Record Provisioning", "timeSpent": "PT0S", "updated": "2023-08-03T06:31:00.000000", "votes": "1", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "[~accountid:6220db96c4d0fe0069535219] ", "created": "2021-09-30T14:17:00.000000"}, {"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "[~accountid:6220db96c4d0fe0069535219]", "created": "2021-11-01T11:58:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34967001", "created": "2023-08-03T06:44:00.000000"}], "components": [], "created": "2021-08-25T08:55:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@70e9cb8a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0at9v:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "30/Sep/21"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_61163327682_*|*_6_*:*_1_*:*_0"}], "description": "As a result of moving some metrics generation from pool_manager to the api, we noticed that acceptance testing for prometheus metric outputs in the API should be implemented.\n\nThis would spin up a real test instance, generate metrics, then validate the correct label values at the prometheus endpoint.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10022", "fixedVersions": [], "id": "10022", "issueType": "Improvement", "key": "POOLER-199", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:44:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:44 AM", "summary": "Add Acceptance Testing for Prometheus Metrics", "timeSpent": "PT0S", "updated": "2023-08-03T06:44:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "6220db96c4d0fe0069535219", "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Fixed in [https://github.com/puppetlabs/vmpooler/pull/457] and released in version 1.1.1", "created": "2021-08-24T09:32:00.000000"}], "components": [], "created": "2021-08-24T09:20:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5dc5d3dd"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0asbf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_711474_*|*_6_*:*_1_*:*_0"}], "description": "It appears otel tracing in VMPooler may have been broken since the last time the gems were updated. It looks like an updated version of the gems may resolve this.\n\n\u00a0\n\nErrors observed in logs:\n{code:java}\nwarning: thread \"Ruby-0-Thread-1: /usr/local/bundle/gems/opentelemetry-sdk-0.15.0/lib/opentelemetry/sdk/trace/export/batch_span_processor.rb:179\" terminated with exception (report_on_exception is true):\nNoMethodError: undefined method `export' for #<Hash:0x61159131>\n export_batch at /usr/local/bundle/gems/opentelemetry-sdk-0.15.0/lib/opentelemetry/sdk/trace/export/batch_span_processor.rb:186\n synchronize at org/jruby/ext/thread/Mutex.java:164\n export_batch at /usr/local/bundle/gems/opentelemetry-sdk-0.15.0/lib/opentelemetry/sdk/trace/export/batch_span_processor.rb:186\n work at /usr/local/bundle/gems/opentelemetry-sdk-0.15.0/lib/opentelemetry/sdk/trace/export/batch_span_processor.rb:169\n loop at org/jruby/RubyKernel.java:1442\n work at /usr/local/bundle/gems/opentelemetry-sdk-0.15.0/lib/opentelemetry/sdk/trace/export/batch_span_processor.rb:158\n reset_on_fork at /usr/local/bundle/gems/opentelemetry-sdk-0.15.0/lib/opentelemetry/sdk/trace/export/batch_span_processor.rb:179\n2021-08-21 13:01:45 - NoMethodError - undefined method `each' for \"DEUEUQ\":String:\n /usr/local/bundle/gems/vmpooler-1.1.0/lib/vmpooler/api/v1.rb:145:in `block in fetch_single_vm'{code}\nAnd addresses breaking changes due to\u00a0[https://github.com/open-telemetry/opentelemetry-ruby/pull/600] (should have released in 0.14.0?)\n\nFull changelog from 0.15.0 to 0.17.0: [https://github.com/open-telemetry/opentelemetry-ruby/compare/opentelemetry-sdk/v0.15.0...opentelemetry-sdk/v0.17.0]", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10021", "fixedVersions": [], "id": "10021", "issueType": "Bug", "key": "POOLER-198", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Fixed", "resolutionDate": "2021-08-24T09:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Fix otel Tracing", "timeSpent": "PT0S", "updated": "2023-05-01T06:11:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "6220db96c4d0fe0069535219", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The last commit on vsphere-automation-sdk-ruby was July 2020. It may be out of date.", "created": "2021-10-06T11:10:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Evidently the vsphere-automation-sdk-ruby has been deprecated as well with no replacement per [https://kb.vmware.com/s/article/87632]\n\n\u00a0\n\nLooks like we might just have to use the REST API directly: [https://developer.vmware.com/apis/vsphere-automation/latest/]\n\n\u00a0", "created": "2023-01-09T13:14:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrated to a community fork, rbvmomi2 as part of [https://github.com/puppetlabs/vmpooler-provider-vsphere/pull/25]", "created": "2023-01-31T12:36:00.000000"}], "components": [], "created": "2021-08-23T08:30:00.000000", "creator": "6220db96c4d0fe0069535219", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@693e7840"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:040900001009070cfy"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "06/Oct/21"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_45464762532_*|*_5_*:*_1_*:*_0"}], "description": "The currently used library, [rbvmomi|https://github.com/vmware-archive/rbvmomi], for interacting with the vSphere API is no longer maintained as of July 1, 2021.\n\nThe VMWare vSphere SDK website ([https://www.vmware.com/support/developer/vapi/index.html]) lists [https://github.com/vmware/vsphere-automation-sdk-ruby] as the current Ruby SDK project.\n\nSince the rbvmomi project only supports up to vSphere 7.0, the vSphere provider will need to be updated to leverage the new SDK to support newer vSphere versions.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10115", "fixedVersions": [], "id": "10115", "issueType": "Task", "key": "POOLER-197", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "6220db96c4d0fe0069535219", "resolution": "Done", "resolutionDate": "2023-01-31T12:36:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Rewrite vSphere Provider with Replacement SDK", "timeSpent": "PT0S", "updated": "2023-01-31T12:36:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966995", "created": "2023-08-03T06:42:00.000000"}], "components": [], "created": "2021-05-19T07:42:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4beb54ba"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o08l23:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_69634818592_*|*_6_*:*_1_*:*_0"}], "description": "[~accountid:557058:252602f6-56c9-47a2-a176-2c9c57106330] reported an issue with litmus using ABS / vmpooler. Litmus creates a reasonably unique job_id called eg iac-task-pid-496 which include the pid of the running command. Sometimes vmpooler (I believe ondemand) will return a 409 \n\n\"XYZ has already been created 409 Conflict\"\n\nI believe that list of 'existing' job_id is never cleaned up. Can we verify this? It should be cleaned up at least when a job is successful or after a cerrtain amount of time has passed.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10071", "fixedVersions": [], "id": "10071", "issueType": "Bug", "key": "POOLER-196", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Medium (migrated)", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:42:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:42 AM", "summary": "vmpooler should cleanup its used job_id list", "timeSpent": "PT0S", "updated": "2023-08-03T06:42:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Thats the ticket I was thinking about when I asked Travis to do the NSPooler one which ended up not being what I thought it was! :P Regardless this one here is a good first ticket too, since we can probably look at ABS and NSpooler that implemented that kind of API.", "created": "2021-03-03T14:53:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Ok so as background you may want to look into 'sinatra' applications and how we use it in ruby for our REST API coding. eg https://x-team.com/blog/how-to-create-a-ruby-api-with-sinatra/\n\nthere was a similar funstionality added to ABS in the routes directory https://github.com/puppetlabs/always-be-scheduling/tree/master/lib/always_be_scheduling/routes\n\nSee ticket DIO-954 and PR https://github.com/puppetlabs/always-be-scheduling/pull/301\n\nWe want to have the same functionality in vmpooler because its also moving to kubernetes. VMPooler also uses sinatra, and also has a concept of checking that the API request is authenticated.\n\ncode for the API endpoints is in https://github.com/puppetlabs/vmpooler/tree/master/lib/vmpooler/api\n\n\n", "created": "2021-03-09T10:03:00.000000"}, {"author": "6220dba149c90000701fd0a3", "body": "Restart endpoint comment is not being logged properly once executed\n\nThe manager does not restart", "created": "2021-05-05T14:11:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Pair programming we fixed a couple things in this commit https://github.com/puppetlabs/vmpooler/commit/93fc0129e1aea296be1d9d6c0042af20121c046b\n\nNeed documentation in readme or API.md and changelog updated.", "created": "2021-05-11T14:27:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966986", "created": "2023-08-03T06:48:00.000000"}], "components": [], "created": "2021-03-03T11:34:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@684b080c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk3yz:i1"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Mar/21"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_30219272028_*|*_3_*:*_1_*:*_46051124622_*|*_6_*:*_1_*:*_0"}], "description": "We should offer an API to restart vmpooler. It should require authentication with a token and permit an authorized user to trigger an application restart.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10123", "fixedVersions": [], "id": "10123", "issueType": "New Feature", "key": "POOLER-194", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:48:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:48 AM", "summary": "API to restart application", "timeSpent": "PT0S", "updated": "2023-08-03T06:48:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I added redis multi calls as a part of my fix and that has caused some problems. I will make an additional PR that removes multi calls until a connection pool can be added.", "created": "2020-11-10T12:28:00.000000"}], "components": [], "created": "2020-10-28T11:28:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2fdb2b01"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o04re0:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_5054_*|*_10009_*:*_1_*:*_421946383_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_791490816"}], "description": "When checking out a VM vmpooler will move it to a running queue, but does not immediately mark the SUT as active. POOLER-191 resolved an issue where machines in this state could be left forever. However, because of the API behavior a machine that is a part of a atomic checkout involving many machines may be inspected for being in the active queue before API has set this data, since they happen separately. We should probably eliminate the active queue and take action based on data kept with the VM instead.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10074", "fixedVersions": [], "id": "10074", "issueType": "Bug", "key": "POOLER-193", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2020-11-11T11:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "API does not immediately designate checked out instances as running", "timeSpent": "PT0S", "updated": "2020-11-11T11:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "5ea206ba1a2cf60c0fab3dbc", "attachments": [], "comments": [], "components": [], "created": "2020-10-22T18:16:00.000000", "creator": "5ea206ba1a2cf60c0fab3dbc", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@734e0aa3"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o04l7w:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_58714556_*|*_3_*:*_1_*:*_30388820_*|*_10009_*:*_1_*:*_244856292_*|*_6_*:*_1_*:*_0"}], "description": "Rubocop has it's first stable release. We should take care of fixing any incompatibilities with the vmpooler codebase and this new version so we can bump the rubocop gem to use it.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10020", "fixedVersions": [], "id": "10020", "issueType": "Improvement", "key": "POOLER-192", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "5ea206ba1a2cf60c0fab3dbc", "resolution": "Done", "resolutionDate": "2020-10-26T15:02:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Update VMPooler to run on rubocop stable release v1.0", "timeSpent": "PT0S", "updated": "2020-10-26T15:02:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "CORRECTION: 'brown-infarct' shows that it is in 'ready' state per the /vm endpoint, but it is in the _running_ queue according to vmpooler. I ran a poolreset for centos-7-x86_64-pixa4 and this did not clear it.\n\nAny long running machines should be destroyed based on their lifetime, however it appears in this case the machine is not being identified as past its running time, which may be a part of why it is running for an extended period of time.", "created": "2020-10-07T17:07:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It looks like the problem here is when a SUT does not have an entry in vmpooler__active, despite having been moved to the `running` queue. The check_running_vm method in pool_manager fails to account for this scenario.", "created": "2020-10-08T14:56:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have added a check to vmpooler that should catch these instances and remove them. The change is up for review.", "created": "2020-10-13T15:17:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change was merged and deployed to production.", "created": "2020-10-14T16:09:00.000000"}], "components": [], "created": "2020-10-07T17:02:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@421a4855"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o04fhk:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_370063_*|*_3_*:*_1_*:*_433324344_*|*_10009_*:*_1_*:*_89498732_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_78462511"}], "description": "[~accountid:557058:f23b0b7f-9386-48b3-bd71-4399766356f4] noticed that some SUTs are still reporting that they are running on a Tintri storage device, despite our configuration indicating that the SUTs should all be provided by Netapp based on the vmpooler configuration. Vmpooler has a global ready TTL of 480, which should cause any machine that has been in the ready state for 8 hours to be deleted and replaced. However, some machines in the ready queue were deployed on 4/01/2020. If the machine is still in the ready queue it may explain why it is not destroyed. However, it is unclear to me why some machines are not being cleaned up when ready TTL is reached.\n\n\u00a0\n\nAn example machine is 'agile-bowstring', which shows a creation date of 4/1, and reports its state as ready according to the /vm endpoint. This machine is not in the ready queue in vmpooler.\n\n'brown-infarct' is another machine that has the same creation date, and _is_ in the ready queue.\n\nBoth machines show a value for 'lifetime' in vmpooler, which suggests that it was checked out at some point. However, this does not explain why the machine is in the ready queue.\n\nThis ticket is to identify why these machines are sticking around.\n\nVmpooler should expire a ready machine within the ready_ttl. There appear to be a few dozen machines in a similar state at a glance.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10092", "fixedVersions": [], "id": "10092", "issueType": "Bug", "key": "POOLER-191", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2020-10-14T16:09:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Some SUTs are not expired when reaching the ready_ttl", "timeSpent": "PT0S", "updated": "2020-10-14T16:09:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "[~accountid:6220db96c4d0fe0069535219]", "created": "2021-09-13T12:04:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966970", "created": "2023-08-03T06:43:00.000000"}], "components": [], "created": "2020-08-26T09:22:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@17d4241f"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:0409000010092"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "13/Sep/21"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_92611265022_*|*_6_*:*_1_*:*_0"}], "description": "When a pool fails to clone due to bad or no template, pooler keeps attempting to clone.\nAmong other things, this spams the log.\nHave also recently seen hangs on vmpooler when this happens. A correlation on this is not certain, but we should really limit the restarts on a failed pool.\n\nFor example, a recent template failure collected over 13,000 log messages in a 12 hour interval\n{noformat}\nfailed while cloning VM with an error: Pool centos-7.2-mono-2018.1.15-preload-x86_64 specifies a template VM of templates/centos-7.2-mono-2018.1.15-preload-x86_64-0.0.1sdfgdfgdgsg which does not exist for the provider vsphere\n{noformat}\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10151", "fixedVersions": [], "id": "10151", "issueType": "Bug", "key": "POOLER-190", "labels": ["mob"], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:43:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:43 AM", "summary": "(MOB) Failed Template Clones loop forever and spam log", "timeSpent": "PT0S", "updated": "2023-08-03T06:43:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "Did some quick sluthing into this. If the config file has the vmpooler service configured first, extending the lifetime just works.\u00a0\nIf the ABS service is defined first, and vmpooler service configured second the extending of lifetime fails.\n\nExample:\n{code}\n> floaty --service=abs get centos-7-x86_64\nRequesting VMs with job_id: 1598981332071.  Will retry for up to an hour.\nWaiting 1 seconds to check if ABS request has been filled.  Queue Position: 97... (x1)\nWaiting 2 seconds to check if ABS request has been filled.  Queue Position: 97... (x2)\nWaiting 3 seconds to check if ABS request has been filled.  Queue Position: 97... (x3)\nWaiting 4 seconds to check if ABS request has been filled.  Queue Position: 97... (x4)\nWaiting 5 seconds to check if ABS request has been filled.  Queue Position: 97... (x5)\njerky-cookie.delivery.puppetlabs.net (centos-7-x86_64)\n> floaty modify jerky-cookie.delivery.puppetlabs.net --lifetime 12\nSuccessfully modified VM jerky-cookie.delivery.puppetlabs.net.\nUse `floaty list --active` to see the results.\n> cat ~/.vmfloaty.yml\nservices:\n  vmpooler:\n    url: 'http://vmpooler.delivery.puppetlabs.net'\n    user: 'christopher.thorn'\n    token: '<snip>'\n  abs:\n    url: 'https://cinext-abs.delivery.puppetlabs.net/api/v2'\n    type: 'abs'\n    user: 'chirstopher.thorn'\n    token: '<snip>'\n  ns:\n    url: 'https://nspooler-service-prod-1.delivery.puppetlabs.net/api/v1'\n    token: '<snip>\n    type: 'nonstandard'\n<EDIT FLOATY CONFIG FILE TO PUT ABS FIRST AND VMPOOLER SECOND>\n> cat ~/.vmfloaty.yml\nservices:\n  abs:\n    url: 'https://cinext-abs.delivery.puppetlabs.net/api/v2'\n    type: 'abs'\n    user: 'chirstopher.thorn'\n    token: '<snip>'\n  vmpooler:\n    url: 'http://vmpooler.delivery.puppetlabs.net'\n    user: 'christopher.thorn'\n    token: '<snip>'\n  ns:\n    url: 'https://nspooler-service-prod-1.delivery.puppetlabs.net/api/v1'\n    token: '997826ac3003943c75f011d9'\n    type: 'nonstandard'\n> floaty modify jerky-cookie.delivery.puppetlabs.net --lifetime 12\nerror: modify is not defined for ABS. Use --trace to view backtrace\n{code}\n\n\u00a0", "created": "2020-09-01T11:35:00.000000"}, {"author": "557058:c241032a-c552-4408-9a96-dcd5a643ff69", "body": "Oh neat, a workaround. I did indeed remove my `vmpooler` config. I'll add it back in and see if that solves this for me. Thanks!", "created": "2020-09-01T12:01:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "[~accountid:557058:291cd4a1-794b-4442-93e6-929db39e48f7] & [~accountid:557058:0391846d-6964-433c-9bfd-0cf76723122b]  - does this help at all (see workaround by [~accountid:557058:79f2fdd6-baee-43c4-91af-c7e647173c95])\n/cc [~accountid:5abd613dd4cf3c56be24b70d], [~accountid:5b9fcedc03b52466f05c4510]", "created": "2020-09-01T13:25:00.000000"}, {"author": "557058:245824fd-f598-4b92-8f0c-3fa4078a3f9b", "body": "Hey folks, has anyone had a chance to verify that workaround above?", "created": "2020-09-07T04:33:00.000000"}, {"author": "557058:c241032a-c552-4408-9a96-dcd5a643ff69", "body": "Yes, the workaround does work for me.", "created": "2020-09-07T11:57:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "The solution here, which is in the repo as PRs for review or mergeed but not tagged and deployed yet is to have ABS fallback to a vmpooler config for the operations that ABS does not support directly.\n\nSee \nexample configs https://github.com/puppetlabs/vmfloaty/pull/97\nimprovement in setting the vmpooler_fallback  https://github.com/puppetlabs/vmfloaty/pull/98\nand the main code with a typical workflow in the comment: https://github.com/puppetlabs/vmfloaty/pull/94\n\nWith the above, it will target ABS by default, and fallback on vmpooler for things like increasing the lifetime, taking snapshots etc. As a bonus we also fixed the bit-bar usage so that VMs checked out through ABS also show in the bit-bar.\n\nThe only gap that still exist is for the {{--reason}} modify flag for nspooler, this would need to be run and specify {{--service nspooler}}", "created": "2020-09-16T15:29:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "[~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] one concern I had about extending the lifetime is the reduced queue cleanup time on ABS that [~accountid:5ea206ba1a2cf60c0fab3dbc] was working on.\nNeed to be sure that if we do extend the lifetime, that the queue processor cleanup (and return processor) doesn't hose the VM despite it being extended.", "created": "2020-09-17T06:44:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "[~accountid:557058:295d7a84-a09b-4348-8961-a1e1764c190e] I'll have a look at the ticket. My understanding was that it would clean up 'pending' requests, no the ones that are 'filled' or 'allocated'", "created": "2020-09-17T08:17:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "done and released! High fives!", "created": "2020-09-23T11:21:00.000000"}], "components": [], "created": "2020-08-26T07:55:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4754bec1"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o042mm:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "01/Sep/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_98670206_*|*_10009_*:*_1_*:*_590298805_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_1742576233"}], "description": "Vmfloaty's modify sub-command should use the vmpooler service by default. \nIf the user doesn't have a the vmpooler service defined in their vmfloaty config file they should be notified that service is required to use the modify sub-command.\n\n(original description below)\nA mechanism needs to be added to allow machines allocated under ABS to extend their lifetimes.\n\nThis also needs to be added to vmfloaty.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10069", "fixedVersions": [], "id": "10069", "issueType": "Improvement", "key": "POOLER-189", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Fixed", "resolutionDate": "2020-09-23T11:21:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Can't extend Lifetime for ABS allocated vmpooler machines", "timeSpent": "PT0S", "updated": "2020-09-23T11:21:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [], "components": [], "created": "2020-08-05T11:51:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@77977687"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o03gao:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_11909_*|*_3_*:*_1_*:*_20675382_*|*_10009_*:*_1_*:*_1995811_*|*_6_*:*_1_*:*_0"}], "description": "On occasion the on demand vmpooler endpoint can place bad data in the requested hash. The result causes issues in pool manager because it tries to clone a VM that does not exist.\n\nThis appears to only affect pools configured on acceptance1. This is likely a bug in the alias evaluation code. It should not be able to produce a key that does not contain all necessary values.\n\nThe bug should be identified, resolved, and some guards added to minimize the possibility of recurrence.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10072", "fixedVersions": [], "id": "10072", "issueType": "Bug", "key": "POOLER-186", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-08-05T18:09:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Ondemand endpoint can place bad key in requested hash", "timeSpent": "PT0S", "updated": "2020-08-05T18:09:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [], "components": [], "created": "2020-07-31T16:10:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1501685d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o03dxk:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_5815_*|*_3_*:*_1_*:*_262842834_*|*_6_*:*_1_*:*_0"}], "description": "While using vmpooler on demand capabilities and testing with ABS I noticed that instances were dying after 1 hour, even though the config said for auth they should have 2 for lifetime. Inspecting vm data in redis lifetime is not getting set. We need to set lifetime for on demand instances.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10122", "fixedVersions": [], "id": "10122", "issueType": "Bug", "key": "POOLER-185", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-08-03T17:11:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "On demand endpoint does not set VM lifetime", "timeSpent": "PT0S", "updated": "2020-08-03T17:11:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "deployed in vmpooler-dev", "created": "2020-09-18T08:50:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I'll deploy this to vmpooler-stage in k8s to validate it is resolved so we can close this out.", "created": "2020-09-22T09:51:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been deployed to vmpooler-stage and vmpooler-prod in k8s. I did not see the issue recur post deployment.", "created": "2020-09-23T08:22:00.000000"}], "components": [], "created": "2020-07-24T14:04:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7fbc3934"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o039q8:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "18/Sep/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_3441528451_*|*_10007_*:*_1_*:*_1785987649_*|*_3_*:*_1_*:*_22387725_*|*_6_*:*_1_*:*_0"}], "description": "In k8s when vmpooler starts up sometimes the redis pod is not yet available. When this happens the manager application logs a backtrace, but continues to run. Vmpooler should crash when the redis server is not responding to connections, or retry.\n\nThis was addressed once before, but redis connection pooling was added since. I suspect the error is no longer raised to where we would look for it in pool manager, which may be why the issue has recurred.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10028", "fixedVersions": [], "id": "10028", "issueType": "Bug", "key": "POOLER-184", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Medium (migrated)", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2020-09-23T08:22:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pool manager does not quit or retry when redis connection is not available", "timeSpent": "PT0S", "updated": "2020-09-23T08:22:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966951", "created": "2023-08-03T06:45:00.000000"}], "components": [], "created": "2020-07-02T11:03:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@152201c1"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o02w9s:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_29442660603_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_67914674278"}], "description": "As per discussion with [~accountid:70121:8f380ca0-fa7a-4c0e-897a-efe198b7c55c].\nThe bucket intervals for histograms in vmpooler are defined statically (see https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/metrics/promstats.rb#L17-L21).\n\nIt would be useful to make this flexible in the configuration file for at least some of the measures.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10019", "fixedVersions": [], "id": "10019", "issueType": "New Feature", "key": "POOLER-182", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:45:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:45 AM", "summary": "Allow vmpooler Histogram buckets to be configurable", "timeSpent": "PT0S", "updated": "2023-08-03T06:45:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966940", "created": "2023-08-03T06:48:00.000000"}], "components": [], "created": "2020-07-02T11:00:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@242f399d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o02w9k:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_29443321167_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_67914358475"}], "description": "This is now feasible with the re-architecture of the Rack stack.\nSo the API should be its own distinct process scaleable separately from the API or the manager.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10068", "fixedVersions": [], "id": "10068", "issueType": "New Feature", "key": "POOLER-181", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:48:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:48 AM", "summary": "Split vmpooler dashboard into its own process (separate from the API)", "timeSpent": "PT0S", "updated": "2023-08-03T06:48:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "PR is up for this change on vmpooler.", "created": "2020-09-22T15:04:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This has been deployed to production.", "created": "2020-10-08T14:53:00.000000"}], "components": [], "created": "2020-07-02T10:59:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3a44c1b5"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o03qj0:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "22/Sep/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_432081572_*|*_10009_*:*_1_*:*_1381746688_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_6667464902"}], "description": "As per discussion with [~accountid:557058:d266d245-5cba-4a99-842a-d1e061513459] - need to add Health Check endpoint to vmpooler (and ABS as well) for k8s:\n * [https://kubernetesbyexample.com/healthz/]\n * [https://docs.microsoft.com/en-us/azure/architecture/patterns/health-endpoint-monitoring]\n\nThis endpoint should check:\n * Redis connection\n * VSphere connection\n * Last time a vm was cloned", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10114", "fixedVersions": [], "id": "10114", "issueType": "New Feature", "key": "POOLER-180", "labels": ["mob"], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Done", "resolutionDate": "2020-10-08T14:53:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "(MOB) Add Health Check Endpoint to vmpooler", "timeSpent": "PT0S", "updated": "2020-10-08T14:53:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Thanks to [~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] we were able to narrow this down to a vmpooler issue. ABS sent a request to ondemandvm for 2 instances. Vmpooler cloned two, but only provided one for the request, and only recorded that one was required when returning status.", "created": "2020-07-02T18:20:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "One of the request that failed with this issue for centos-7-x86_64 had the redis entry for {code}\nvmpooler__odrequest__c6538822-678a-47d4-9cf8-3e3f19a852b1\n{code}\n\n{code}\n centos-5-x86_64:centos-5-x86_64:1,centos-5-i386:centos-5-i386:1,centos-6-i386:centos-6-i386:1,redhat-8-x86_64:redhat-8-x86_64-pixa4:1,centos-7-x86_64:centos-7-x86_64-pixa4:1,centos-7-x86_64:centos-7-x86_64:1,centos-6-x86_64:centos-6-x86_64:1\n{code}\n\nAs you see the second member of the platform is different. I dont fully appriociate why this is the case, but I think as a result when we build the result for the client in the code here https://github.com/puppetlabs/vmpooler/blob/4ecd5dea5166c6743e1dbcac09c201afbbceef8f/lib/vmpooler/api/v1.rb#L982-L999\n\n{{result[platform_alias]}} would get run twice and replace each other instead we should merge it if there is already data there.", "created": "2020-07-03T15:19:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I believe the issue is within check_ondemand_vm. Specifically, it looks like we were not accounting for the possibility of two instances with the same platform alias when checking ondemand requests. This would mean we would only show the data for the last instance group in the alias when pending, and when showing the status of the request, which in turn causes the wrong number of instances to be returned. I've put up a PR that addresses this issue in testing.", "created": "2020-07-08T10:20:00.000000"}], "components": [], "created": "2020-07-01T18:09:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2b62c8b6"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o02ve8:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Jul/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_87135583_*|*_3_*:*_1_*:*_489477981_*|*_10009_*:*_1_*:*_14035019_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler does not consistently allocate the correct number of instances via ondemandvm. Sometimes it will return a single instance when two are requested for a platform. The issue is intermittent and takes a few attempts to reproduce. We were able to reproduce it from ABS. It appeared that the correct number of instances were cloned, but the ondemandvm endpoint displayed incorrect data in response to the get requested, thinking that only one instance was required when two were requested.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10121", "fixedVersions": [], "id": "10121", "issueType": "Bug", "key": "POOLER-183", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-07-08T14:13:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler does not consistently allocate the correct number of instances via ondemandvm endpoint", "timeSpent": "PT0S", "updated": "2020-07-08T14:13:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Here is the PR where I updated this to 8 hours. It has been deployed to production.\n\nhttps://github.com/puppetlabs/cinext-docs/pull/56", "created": "2020-06-29T09:02:00.000000"}], "components": [], "created": "2020-06-25T10:29:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3ea753d6"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o02qeo:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_6556_*|*_3_*:*_1_*:*_340376704_*|*_10009_*:*_1_*:*_217596908_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler-cinext has a default ready_ttl value of 60. This setting used to have a default of 0, which was unreasonable. However, 60 is too aggressive for our environment. This ticket serves to track updating the default ready_ttl to 8 hours.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10024", "fixedVersions": [], "id": "10024", "issueType": "Improvement", "key": "POOLER-179", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-07-01T21:28:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Increase vmpooler ready_ttl from default", "timeSpent": "PT0S", "updated": "2020-07-01T21:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "attachments": [], "comments": [{"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "This turned out to be a trivial implementation, so adding it as its useful to get it in before the stats are initiated.", "created": "2020-06-25T12:47:00.000000"}], "components": [], "created": "2020-06-23T10:10:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@123a07cb"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:044i"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_3061904_*|*_10007_*:*_1_*:*_65664909_*|*_3_*:*_1_*:*_492365734_*|*_10009_*:*_1_*:*_2671691_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_179201412"}], "description": "The same set of stats are iniatialised for both the vmpooler api and pooler manager, despite some of them being specific to one of the sub-systems.\n\n\u00a0\n\nAdd categorisation to the stat setup to allow the stats only be initialised for whichever sub-system uses them.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10183", "fixedVersions": [], "id": "10183", "issueType": "New Feature", "key": "POOLER-178", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Fixed", "resolutionDate": "2020-07-02T00:33:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Target Prom Stats correctly for API and Pooler Manager", "timeSpent": "PT0S", "updated": "2020-07-02T00:33:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "attachments": [], "comments": [{"author": "557058:d266d245-5cba-4a99-842a-d1e061513459", "body": "As an initial stop-gap we can use a filter on our Prometheus server to drop these metrics so that they don't cost us money. Beyond that, some suggestions before we start coding a solution to this:\n * Let's ask our Grafana peeps if they have suggestions on how best to deal with scenarios like that\n * Let's ask in [https://kubernetes.slack.com/archives/CD0T3TQUR] (#prometheus) for suggestions on how others deal with this\n\nFinally, an honest question: does each vm need to be an api path or should we look at making the dns name of the vm a parameter to the vm endpoint? I fully understand that the answer to this could be no but we can't do anything about it until we make a v2 api. I am just curious.", "created": "2020-06-17T06:39:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "I've done a bit of looking up and the example suggests a way to do it - see [https://github.com/prometheus/client_ruby/tree/master/examples/rack]\n\nAnd specifically the reference to: [https://github.com/prometheus/client_ruby/blob/982fe2e3c37e2940d281573c7689224152dd791f/lib/prometheus/middleware/collector.rb#L97-L101]\n\nSo going to try a quick POC of copying this module and adding an extra path check for /vm/:hostname to see if that filters the paths adequately.", "created": "2020-06-17T12:11:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "Taking this into current sprint as have worked out a straightforward way of addressing this and cleaning up the stats we are gathering.", "created": "2020-06-19T05:33:00.000000"}], "components": [], "created": "2020-06-16T13:45:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@60ea9875"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:045"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Jun/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_229661808_*|*_10007_*:*_1_*:*_645951328_*|*_3_*:*_1_*:*_456551319_*|*_10009_*:*_1_*:*_2656378_*|*_5_*:*_1_*:*_0"}], "description": "The Prometheus Collector Middleware collects stats for every unique API path thru the vmpooler API.\n\nSo for example, each Delete operation has the unique VM name and will generate 10+ stat lines per delete. The same applies to any VM operation (e.g. modify).\n\nSuggest we make our own copy of the collector and add filtering for certain paths.\n\nSame output for 3 delete operations:\n{noformat}\nvmpooler_http_requests_total{code=\"200\",method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\"} 1.0\nvmpooler_http_requests_total{code=\"200\",method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\"} 1.0\nvmpooler_http_requests_total{code=\"200\",method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\"} 1.0\nvmpooler_http_requests_total{code=\"200\",method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\"} 1.0\n\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"0.005\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"0.01\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"0.025\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"0.05\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"0.1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"0.25\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"0.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"2.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"10\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\",le=\"+Inf\"} 1.0\nvmpooler_http_request_duration_seconds_sum{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\"} 0.012309300000197254\nvmpooler_http_request_duration_seconds_count{method=\"delete\",path=\"/api/v1/vm/drunken-tilling.delivery.puppetlabs.net\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"0.005\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"0.01\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"0.025\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"0.05\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"0.1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"0.25\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"0.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"2.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"10\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\",le=\"+Inf\"} 1.0\nvmpooler_http_request_duration_seconds_sum{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\"} 0.0123888000007355\nvmpooler_http_request_duration_seconds_count{method=\"delete\",path=\"/api/v1/vm/olde-writing.delivery.puppetlabs.net\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"0.005\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"0.01\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"0.025\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"0.05\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"0.1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"0.25\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"0.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"2.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"10\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\",le=\"+Inf\"} 1.0\nvmpooler_http_request_duration_seconds_sum{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\"} 0.012892799999463023\nvmpooler_http_request_duration_seconds_count{method=\"delete\",path=\"/api/v1/vm/horrid-scenario.delivery.puppetlabs.net\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"0.005\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"0.01\"} 0.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"0.025\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"0.05\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"0.1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"0.25\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"0.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"1\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"2.5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"5\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"10\"} 1.0\nvmpooler_http_request_duration_seconds_bucket{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\",le=\"+Inf\"} 1.0\nvmpooler_http_request_duration_seconds_sum{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\"} 0.011117999999441963\nvmpooler_http_request_duration_seconds_count{method=\"delete\",path=\"/api/v1/vm/cynical-climate.delivery.puppetlabs.net\"} 1.0\n{noformat}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10018", "fixedVersions": [], "id": "10018", "issueType": "New Feature", "key": "POOLER-177", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Fixed", "resolutionDate": "2020-07-02T00:32:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Prometheus API collector stat count issue with vmpooler delete/modify operations", "timeSpent": "PT0S", "updated": "2020-07-02T00:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "6220db96c4d0fe0069535219", "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "VMPooler 1.1.0 containing this change has been released and deployed.", "created": "2021-08-18T07:25:00.000000"}], "components": [], "created": "2020-06-16T13:40:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@64d74e5d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o09phu:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "18/Aug/21"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_595239059_*|*_3_*:*_1_*:*_1477268169_*|*_10009_*:*_1_*:*_496424900_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_34387746096"}], "description": "As per discussion with [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] - it would be cool to collect user stats at the start of the vmpooler allocate phase as well as when it is destroyed.\nCould this be done with another label - \"Operation=(Allocate|Destroy)\" in the metric ?", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10113", "fixedVersions": [], "id": "10113", "issueType": "New Feature", "key": "POOLER-176", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Done", "resolutionDate": "2021-08-18T07:25:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Collect vmpooler/user stats at allocation as well as destroy", "timeSpent": "PT0S", "updated": "2021-08-18T07:25:00.000000", "votes": "1", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [], "components": [], "created": "2020-06-10T11:30:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@40f3ffa0"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o02hl4:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_77381197_*|*_3_*:*_1_*:*_84651071_*|*_10009_*:*_1_*:*_262713127_*|*_6_*:*_1_*:*_0"}], "description": "POOLER-158\u00a0 introduced a capability\u00a0 to\u00a0 provision\u00a0 instances on demand. The change became quite large so it was eventually merged with some standing feedback still open requesting improvements that reduce duplication of code. This ticket serves to track following up on these improvements and implementing the suggested changes.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10091", "fixedVersions": [], "id": "10091", "issueType": "Improvement", "key": "POOLER-174", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-06-15T09:29:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Reduce duplicate of on demand code introduced in POOLER-158", "timeSpent": "PT0S", "updated": "2020-06-15T09:29:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I believe this is happening because we are overloading graphite with attempts to connect to send metrics, mostly because of the addition of redis connection pooling, which is quite noisy with metrics. In order to support this increase in frequency it will be necessary to add something like statsd to send the metrics to graphite in batches. We don't have a statsd server any longer, so one will need to be deployed to support this. If I cannot get this done today I will revert the changes in vmpooler so this issue does not recur.", "created": "2020-06-08T11:51:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I discovered graphite-be2-prod is configured to have statsd running. Some of the required components were missing so I added them and pointed vmpooler to it. This appears to have resolved the errors. We will have to give it some bake time to validate that those errors were related to the issues.", "created": "2020-06-09T17:25:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "seems a recent update also inadvertently put us back on jruby 2.9.11 that has issues with stackoverflows\nWe are now trying the setting -Xinvokedynamic.yield=false\nfor Jruby", "created": "2020-06-10T15:39:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I had been testing with 9.2.11 and it was working for me consistently in test instances with 9.2.11.1 after being able to reproduce the issues with 9.2.11.0. I thought that it would be okay to switch back to 9.2.11.1 as a result. It looks like it did end up bundled in with the on demand VM changes and this latest version, so until recently we were running 9.2.9.x.\n\nIf we find that the invokedynamic.yield=false does not meaningfully improve this situation, and the errors recur, then we should switch back to 9.2.9.x again.", "created": "2020-06-10T15:52:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "was this added manually or also in the marathon json files?", "created": "2020-06-15T10:25:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I made the change in the cinext-docs repo on the vmpooler manager application, which appears to be the only one that has been affected. I deployed it via the API and put up a PR to cinext-docs for the environment variable addition.\n\n[https://github.com/puppetlabs/cinext-docs/pull/55]", "created": "2020-06-15T10:31:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Great thanks. I'm just thinking, we should have some high level note in the docs for the FOSS users of vmpooler?", "created": "2020-06-15T12:25:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "[~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] yes that is a good point. I have added a PR to vmpooler that adds some information to the readme under configuration. Do you think that is sufficient, or were you hoping to see this information presented somewhere else, or in greater detail?\n\nhttps://github.com/puppetlabs/vmpooler/pull/384", "created": "2020-06-15T18:13:00.000000"}], "components": [], "created": "2020-06-08T07:16:00.000000", "creator": "557058:d266d245-5cba-4a99-842a-d1e061513459", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@74eeeefb"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o02f3s:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "08/Jun/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_16482387_*|*_3_*:*_1_*:*_106381406_*|*_10009_*:*_1_*:*_1114361866_*|*_5_*:*_1_*:*_0"}], "description": "On June 5th [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] found that worker threads were dying. He restarted the manager app in Marathon and things started functioning again. The same thing happened this morning (June 8th).", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10017", "fixedVersions": [], "id": "10017", "issueType": "Bug", "key": "POOLER-172", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Critical", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:d266d245-5cba-4a99-842a-d1e061513459", "resolution": "Fixed", "resolutionDate": "2020-06-22T14:57:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler worker threads dying", "timeSpent": "PT0S", "updated": "2020-06-22T14:57:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "5ea206ba1a2cf60c0fab3dbc", "attachments": [], "comments": [], "components": [], "created": "2020-05-28T11:41:00.000000", "creator": "5ea206ba1a2cf60c0fab3dbc", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4602c1d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0244c:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_32107_*|*_3_*:*_1_*:*_5865879_*|*_10009_*:*_1_*:*_90533439_*|*_6_*:*_1_*:*_0_*|*_10011_*:*_1_*:*_7050"}], "description": "VMPooler currently only supports a single user object as defined in its configuration file (\"uid\"). However, in order for it to allow authentication through ldap service accounts, we also need it to support the \"cn\" user object.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10016", "fixedVersions": [], "id": "10016", "issueType": "Improvement", "key": "POOLER-171", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "5ea206ba1a2cf60c0fab3dbc", "resolution": "Done", "resolutionDate": "2020-05-29T14:28:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Enable support for multiple ldap user objects", "timeSpent": "PT0S", "updated": "2020-05-29T14:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "attachments": [], "comments": [], "components": [], "created": "2020-05-27T11:34:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5bb1ddce"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:044"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "0.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1215753869_*|*_10007_*:*_1_*:*_652144534_*|*_3_*:*_1_*:*_1200103110_*|*_10009_*:*_1_*:*_2674639_*|*_5_*:*_1_*:*_0"}], "description": "The current statsd measures that we gathering in pooler_manager.rb on vmpooler (see https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L380-L436) are going to cause issues with Prometheus.\n\nThis task to figure out prometheus friendly metrics and imlement them as a follow up to POOLER-160.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10090", "fixedVersions": [], "id": "10090", "issueType": "New Feature", "key": "POOLER-170", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Fixed", "resolutionDate": "2020-07-02T00:32:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Sort out how we gather vmpooler user stats on Prometheus", "timeSpent": "PT0S", "updated": "2020-07-02T00:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966933", "created": "2023-08-03T06:50:00.000000"}], "components": [], "created": "2020-05-27T11:22:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@72cb25a9"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:04090000c"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_29011478965_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_71455381969"}], "description": "vmpooler allow unauthenticated VM requests, even when auth is enabled. The application should support a configuration where all requests require authentication.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10120", "fixedVersions": [], "id": "10120", "issueType": "Improvement", "key": "POOLER-169", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:50:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:50 AM", "summary": "Allow vmpooler to require authentication for all requests", "timeSpent": "PT0S", "updated": "2023-08-03T06:50:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "5ea206ba1a2cf60c0fab3dbc", "attachments": [], "comments": [{"author": "5ea206ba1a2cf60c0fab3dbc", "body": "This capability has been tested and validated locally and works as expected. A draft PR can be found [here|https://github.com/puppetlabs/vmpooler/pull/382] for its integration into vmpooler.", "created": "2020-06-10T11:53:00.000000"}], "components": [], "created": "2020-04-15T10:24:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5ce8f4b8"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:046"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "5.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "10/Jun/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_6591_*|*_3_*:*_2_*:*_691317889_*|*_10009_*:*_1_*:*_976302744_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_2_*:*_4315479156"}], "description": "Vmpooler should be able to configure the network for a machine at clone time. Without this addition it is necessary to have a template configured with the target network for each network that we are supporting. As we plan on increasing the volume of vmpooler SUTs we plan to outgrow the IP space allocated at pix for acceptance2 and 4. To support additional IP spaces it will be necessary to have this capability. This ticket serves to validate that the function works as expected, and integrate the capability into vmpooler.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10174", "fixedVersions": [], "id": "10174", "issueType": "New Feature", "key": "POOLER-167", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "High (migrated)", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2020-06-23T16:23:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Network should be configurable per pool", "timeSpent": "PT0S", "updated": "2020-06-24T09:17:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I assume querying DNS is a first step, so we can stop if the DNS is not populated.\n\nBut in the case we do get an IP back, I assume we would have to compare that to the IPs as seen by vmware and make sure it is one of them", "created": "2020-05-27T15:53:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I think we can handle this in the name selection for vmpooler, before we ever create an instance. If there is a DNS record for the name we should emit a metric to represent this and report the name as unusable. Right now we perform a similar check, but we only look to redis.", "created": "2020-05-27T16:46:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "oh I get it now, we could do that check even before the VM is created. I was going to check the DNS upon checking out a VM, but it makes more sense to check before we even create it, I'll look into the anchor point for that.", "created": "2020-05-27T16:56:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I\u2019ve added a log parser for stale DNS logs to Gene\u2019s vmpooler playground here: https://puppet.grafana.net/d/WWHy_ZBZk/vmpooler-playground?orgId=1&refresh=30s&from=now-7d&to=now", "created": "2020-06-05T13:47:00.000000"}], "components": ["VM Pooler"], "created": "2020-04-07T14:53:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@21e9ad0c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:0409i"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "27/May/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_7214_*|*_10007_*:*_1_*:*_610220401_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_4483391764"}], "description": "DIO-580 identified instances where vmpooler instances were reported to have reached a ready state, while something else entirely was responding to its socket requests. Vmpooler should offer a more defensive mechanism to reduce the likelihood of this occurring. If DNS is queried at provisioning time then the application could avoid creating instances that would conflict with existing DNS entries.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10065", "fixedVersions": [], "id": "10065", "issueType": "Improvement", "key": "POOLER-166", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-06-05T13:47:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler should check DNS to ensure a record does not exist before VM creation", "timeSpent": "PT0S", "updated": "2020-06-05T13:47:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [], "components": [], "created": "2020-04-07T10:33:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@25f24ea7"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0176g:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8586_*|*_10009_*:*_1_*:*_12594597_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler fails to remove pools that are no longer configured when remove_unconfigured_pools is enabled. This functionality should get fixed so vmpooler can remove pools that are no longer configured for use.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10015", "fixedVersions": [], "id": "10015", "issueType": "Bug", "key": "POOLER-165", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-04-07T14:03:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler purge_unconfigured_folders fails to remove pools that are no longer configured", "timeSpent": "PT0S", "updated": "2020-04-07T14:03:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:d24f1e0b-0b31-405d-8707-8ecd214210d6", "body": "As I don't know ruby I initially like to know if this is even possible? What requirements would vRA need to fulfill to be able to be a provider? Browsing the code I, for example, saw references to vm snapshots. We can't create vm snapshots through vRA (maybe vRA supports this, but we're not allowed to do it). Is that a blocker?", "created": "2020-03-26T09:59:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I believe it is possible to add a vRA provider to vmpooler. I'm not sure if this is the most simple route to go. It may be more straightforward to provision in vRA directly, or via beaker.\n\nThere have been some efforts to make it easier to add additional providers to vmpooler. We (puppet) have only produced and consumed the vsphere provider.\n\nThis issue requested adding support for additional providers.\n\n[https://github.com/puppetlabs/vmpooler/issues/181]\n\nThis PR added the capability to load additional providers from a separate gem.\n\n[https://github.com/puppetlabs/vmpooler/pull/263]\n\nWith those capabilities you could create a discrete vRA gem. It also sounded like this is something that logicminds from PR #263 linked above had started to consider, but I'm not sure if it was ever implemented. I could not find a gem for this when looking around briefly. You may consider asking them if this work resulted in any fruit.\n\nIt's unclear to me at a glance if the vRA clients have sufficient functionality to replicate what vsphere is doing. If not, it could be necessary to make code changes in vmpooler in pool_manager to make the provider fully functional.\n\nThis is not something that we would tackle or implement in DIO, but I'm happy to answer questions about it if you think it's something you would want to explore.", "created": "2020-03-26T10:27:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966919", "created": "2023-08-03T06:41:00.000000"}], "components": ["VM Pooler"], "created": "2020-03-26T09:55:00.000000", "creator": "557058:d24f1e0b-0b31-405d-8707-8ecd214210d6", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6794da2c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o00npk:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "26/Mar/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_105828378072_*|*_6_*:*_1_*:*_0"}], "description": "We can't use vsphere directly for provisioning vms but we need to use vRA (vRealize Automation). Reading the docs I suppose a provider impl is needed for this.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10150", "fixedVersions": [], "id": "10150", "issueType": "New Feature", "key": "POOLER-164", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:d24f1e0b-0b31-405d-8707-8ecd214210d6", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:41:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:41 AM", "summary": "vRA provider", "timeSpent": "PT0S", "updated": "2023-08-03T06:41:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966912", "created": "2023-08-03T06:40:00.000000"}], "components": [], "created": "2020-03-24T15:07:00.000000", "creator": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7c42bf52"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o00m9c:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_105982395333_*|*_6_*:*_1_*:*_0"}], "description": "Currently, if a VM locks up, a user has to contact someone with vsphere permissions to restart the machine. It seems like it would be good if users had some power management functions on VMs they've checked out.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10014", "fixedVersions": [], "id": "10014", "issueType": "New Feature", "key": "POOLER-163", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:40:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:40 AM", "summary": "Add power management endpoints", "timeSpent": "PT0S", "updated": "2023-08-03T06:40:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966903", "created": "2023-08-03T06:49:00.000000"}], "components": [], "created": "2020-03-23T18:17:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@10affc1b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o00lk8:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_29011436545_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_77046499195"}], "description": "When vmpooler API is starting and redis server is not available it will log messages and report backtraces, but will not exit the application, or recover. Vmpooler should force the API to exit the application, rather than continuing to run and report backtraces.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10067", "fixedVersions": [], "id": "10067", "issueType": "Bug", "key": "POOLER-162", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:49:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:49 AM", "summary": "API does not restart if redis server is unavailable at startup", "timeSpent": "PT0S", "updated": "2023-08-03T06:49:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "[~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] is this expected behavior of the new max TTL with vmpooler?", "created": "2020-03-23T14:03:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Yes the maximum is around 2 weeks. Thats 2 weeks total, either when created outright or extended to that amount, we do not allow going over that limit.", "created": "2020-04-08T17:57:00.000000"}, {"author": "557058:55b23daa-f1aa-41fd-b10d-bf49a61932da", "body": "[~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] The --lifetime flag modifies the total TTL - in the first snippet above, you can see the runtime/lifetime change from 0.01/200 to 0.01/100. In the second snippet, I try to adjust the lifetime to 335 - this should be fine, since the max TTL is 336 and --lifetime modifies total TTL, but it fails. It shouldn't take current running time into account when making that check.", "created": "2020-04-08T20:15:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I see, that does sound like a bug, thanks for the reproduction steps above. \nWe have to check what vmpooler API floaty uses for that flag and then check the vmpooler code to take that in consideration.", "created": "2020-04-09T11:33:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "[~accountid:557058:79f2fdd6-baee-43c4-91af-c7e647173c95] can you schedule it for next sprint? it doesnt show up in my DIO scrum board...", "created": "2020-04-09T11:56:00.000000"}, {"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "Alright it is in the next sprint, when we start it if it isn't on the scrum board then I think we need to tweak something.", "created": "2020-04-09T13:23:00.000000"}], "components": [], "created": "2020-03-23T13:26:00.000000", "creator": "557058:55b23daa-f1aa-41fd-b10d-bf49a61932da", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@271f0fdf"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o00l6w:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "23/Mar/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1462090538_*|*_10007_*:*_1_*:*_78568749_*|*_3_*:*_1_*:*_406997_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_520445008"}], "description": "I am using vmfloaty to interact with vmpooler.\n\nThe {{lifetime}} flag modifies the total TTL of the vm, rather than adding to the current TTL:\n{code:java}\nmolly.waggett@molly:~ $ floaty modify rarer-moontrack --lifetime 200\nSuccessfully modified VM rarer-moontrack.\nUse `floaty list --active` to see the results.\nmolly.waggett@molly:~ $ floaty list --active\nYour VMs on vmpooler.delivery.puppetlabs.net:\n- rarer-moontrack.delivery.puppetlabs.net (centos-7-x86_64, 0.01/200 hours)\nmolly.waggett@molly:~ $ floaty modify rarer-moontrack --lifetime 100\nSuccessfully modified VM rarer-moontrack.\nUse `floaty list --active` to see the results.\nmolly.waggett@molly:~ $ floaty list --active\nYour VMs on vmpooler.delivery.puppetlabs.net:\n- rarer-moontrack.delivery.puppetlabs.net (centos-7-x86_64, 0.01/100 hours)\n{code}\n\nHowever, I am unable to modify a vm's lifetime if <current running time> + <new lifetime> exceeds the max TTL:\n{code:java}\nmolly.waggett@molly:~/puppet-projects/pe-file-sync(master-playground\ud83d\udc4c ) $ floaty list --active\nYour VMs on vmpooler.delivery.puppetlabs.net:\n- imcomplete-surf.delivery.puppetlabs.net (ubuntu-1804-x86_64-pixa4, 141.24/200 hours)\nmolly.waggett@molly:~/puppet-projects/pe-file-sync(master-playground\ud83d\udc4c ) $ floaty modify imcomplete-surf --lifetime 335\nHTTP 400: Failed to modify VMs from the pooler vm/imcomplete-surf. {\"ok\"=>false, \"failure\"=>[\"You provided a lifetime (335) that will extend the current lifetime past the configured maximum of 336.\"]}\n{code}\n\nI would expect my vm to be at 141/335 hours in this case.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10112", "fixedVersions": [], "id": "10112", "issueType": "Bug", "key": "POOLER-161", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:55b23daa-f1aa-41fd-b10d-bf49a61932da", "resolution": "Fixed", "resolutionDate": "2020-04-16T10:04:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Can't extend vm lifetime beyond (max lifetime - current running time)", "timeSpent": "PT0S", "updated": "2020-04-16T10:04:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "attachments": [], "comments": [], "components": [], "created": "2020-03-18T11:45:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@661bf4aa"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyk9sv:042"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "0.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_12318_*|*_10007_*:*_1_*:*_652072899_*|*_3_*:*_1_*:*_4332915971_*|*_10009_*:*_2_*:*_2417584668_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_1715472296"}], "description": "As DIO moves towards using Prometheus we want to add support to vmpooler to present metrics for Prometheus to scrape. To support this we need to add an endpoint to both vmpooler applications components to present metrics. Additionally, a stats handler is necessary to\u00a0 provide functionality like the statsd and graphite handlers.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10173", "fixedVersions": [], "id": "10173", "issueType": "New Feature", "key": "POOLER-160", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-07-02T00:32:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Add prometheus endpoint and stats handler to vmpooler", "timeSpent": "PT0S", "updated": "2020-07-02T00:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "Moved up the priority, when this recently happened it came very close to making a PE release slip. ", "created": "2020-03-11T10:06:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966895", "created": "2023-08-03T06:46:00.000000"}], "components": [], "created": "2020-03-10T15:47:00.000000", "creator": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Acceptance Criteria", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:textarea", "value": "* There is a config option available for setting a maximum number of VMs that can be checked out with one token\n* There is a config option available to override this behavior for a particular user (such as ABS or Jenkins)"}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@64292387"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o011fw:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "11/Mar/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_29077157650_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_78112828392"}], "description": "Current plan after discussing this in Brainstorming:\n* Limit VMs/user to 50\n* Add whitelist to opt-out of this behavior\n* Add Jenkins/ABS to the whitelist", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10172", "fixedVersions": [], "id": "10172", "issueType": "Improvement", "key": "POOLER-159", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Medium (migrated)", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:46:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:46 AM", "summary": "Add a limit on how many VMs a user can check out at one time", "timeSpent": "PT0S", "updated": "2023-08-03T06:46:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This capability is working now on the branch pooler_158 on my fork. Here's the comparison [https://github.com/puppetlabs/vmpooler/compare/master...mattkirby:pooler_158?expand=1#]\n\nIt now works properly in my testing, including with the addition of redis connection pooling. However, I still need to tackle both fixing tests that I have broken with my additions and changes, as well as implementing tests for the new functionality. It works well and provisions quickly.\n\nYou can hack on it by running the docker compose setup with the pooler_158 branch.\n\nI did add some reporting in the get ondemandvm/$request_id call to return the status of a request fulfillment to improve the user experience when waiting for a request to be fulfilled. It will show the number pending and the number ready for each pool of instances requested. I still need to add this to the linked google doc spec.", "created": "2020-04-24T16:22:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have implemented this capability and written tests for the new functionality added. Additionally, tests have been added for the changes made to vmpooler. I have put up a PR for review, but there are still some additional changes required before this is going to be ready to merge. Specifically, the following specific items need to be addressed.\n * -expiring requests that are not successfully fulfilled within a specified TTL-\n * -ensuring auth data gets tagged on redis vm hash for provisioned instances-\n * -expiring request data so it does not sit in redis forever-\n * -limit of a maximum number of instances that can be requested for one pool-\n * -remove \u2018pool is empty\u2019 messages when pool size is set to 0-\n * -add a capability to delete a request-\n * -add information about ondemand tasks to dashboard-\n * -document the API additions-\n * -document the configuration changes-\n * -add readme documentation to reflect a major additional capability being added to vmpooler-", "created": "2020-05-07T10:40:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change is ready for review. The changes have been deployed to vmpooler-dev.k8s.infracore.puppet.net. You can view logs for the instance in k8s under the vmpooler namespace.", "created": "2020-05-12T10:15:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Integration testing is under way.\n\n_details_\n*Jenkins*: https://jenkins-cinext.delivery.puppetlabs.net/job/legacy_facter_storage_cinext_pkg-van-ship_cinext/ with RMM pointing to ABS below:\n*ABS*: http://leader.cinext-test.mesos:8080/ui/#/apps/%2Falways-be-scheduling-queue-processor/ with configuration such as:\n{{\"image\": \"artifactory.delivery.puppetlabs.net/qe/always-be-scheduling:dio587-dev3\"}}\n{{ABS_VMPOOLER_API_URL=http://vmpooler-dev.k8s.infracore.puppet.net/api/v1}}\n{{ABS_VMPOOLER_TOKEN=<token>}}\n{{ABS_LOGLEVEL=DEBUG}}\n\nSo the logs are a bit more verbose with ABS_LOGLEVEL=DEBUG\n\nThe POST\n\n{code}\n[2020-05-14 21:03:48.922] DEBUG Query http://vmpooler-dev<fqdn>/api/v1/totalrunning\n[2020-05-14 21:03:48.992] DEBUG POST to http://vmpooler-dev<fqdn>/api/v1/ondemandvm with body {\"redhat-7-x86_64\":1,\"request_id\":\"7b528f8e-0d16-4ed9-a962-986ebcf0d5bd\"}\n[2020-05-14 21:03:49.028] DEBUG Response received: {\"ok\"=>true, \"request_id\"=>\"7b528f8e-0d16-4ed9-a962-986ebcf0d5bd\", \"domain\"=>\"<domain>\"}\n[2020-05-14 21:03:49.030] ERROR QueueProcessor Temporary failed job 7b528f8e-0d16-4ed9-a962-986ebcf0d5bd: Temporary allocation failure: [7b528f8e-0d16-4ed9-a962-986ebcf0d5bd] requested VMs are not ready yet, try again \n{code}\n\nthen a bunch of GET until it's ready\n\n{code}[2020-05-14 21:04:24.518] DEBUG GET from http://vmpooler-dev<fqdn>/api/v1/ondemandvm/7b528f8e-0d16-4ed9-a962-986ebcf0d5bd\n[2020-05-14 21:04:24.588] DEBUG Response received: {\"ok\"=>true, \"request_id\"=>\"7b528f8e-0d16-4ed9-a962-986ebcf0d5bd\", \"ready\"=>false, \"redhat-7-x86_64\"=>{\"ready\"=>\"1\", \"pending\"=>\"0\"}}\n[2020-05-14 21:04:24.590] ERROR QueueProcessor Temporary failed job 7b528f8e-0d16-4ed9-a962-986ebcf0d5bd: Temporary allocation failure: [7b528f8e-0d16-4ed9-a962-986ebcf0d5bd] requested VMs are not ready yet, try again redhat-7-x86_64 (ready: 1, pending: 0)\n[2020-05-14 21:04:24.590] INFO QueueProcessor Processed 6 jobs from redis\n[2020-05-14 21:04:29.593] INFO QueueProcessor A new batch processing has started, queue count is: 6\n[2020-05-14 21:04:29.602] INFO QueueProcessor Processing job 7b528f8e-0d16-4ed9-a962-986ebcf0d5bd URL=[https://jenkins-cinext.<domain>/job/<job>]\n[2020-05-14 21:04:29.602] INFO VmpoolerAllocator using credentials\n[2020-05-14 21:04:29.602] INFO Created Vmpooler allocator using API URL [http://vmpooler-dev<fqdn>/api/v1]\n[2020-05-14 21:04:29.602] INFO Created OndemandVmpoolerAllocator using Vmpooler allocator using API URL [http://vmpooler-dev<fqdn>/api/v1]\n[2020-05-14 21:04:29.603] DEBUG Query http://vmpooler-dev<fqdn>/api/v1/totalrunning\n[2020-05-14 21:04:29.634] DEBUG GET from http://vmpooler-dev<fqdn>/api/v1/ondemandvm/7b528f8e-0d16-4ed9-a962-986ebcf0d5bd\n[2020-05-14 21:04:29.668] DEBUG Response received: {\"ok\"=>true, \"request_id\"=>\"7b528f8e-0d16-4ed9-a962-986ebcf0d5bd\", \"ready\"=>true, \"redhat-7-x86_64\"=>{\"hostname\"=>[\"content-shake\"]}, \"domain\"=>\"<domain>\"}\n[2020-05-14 21:04:29.668] DEBUG Query API poolstat info for [\"redhat-7-x86_64\", \"request_id\"]\n[2020-05-14 21:04:29.668] DEBUG Query http://vmpooler-dev<fqdn>/api/v1/poolstat?pool=redhat-7-x86_64,request_id{code}\n\nThe one that's a bit special is\n{code}\n[2020-05-14 21:04:24.588] DEBUG Response received: {\"ok\"=>true, \"request_id\"=>\"7b528f8e-0d16-4ed9-a962-986ebcf0d5bd\", \"ready\"=>false, \"redhat-7-x86_64\"=>{\"ready\"=>\"1\", \"pending\"=>\"0\"}}\n{code}\nbecause it's ready==\"false\" but everything I asked for is ready. Don't know if that was just a race scenario, but I wanted to save it somewhere.", "created": "2020-05-14T15:21:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "[~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] that is possible because of the way the processing works. I don't believe it's an indicator of an issue, but it is a little strange to see as a user.\n\nThe API inquiry checking request status, and the movement of that request from pending to ready, are separate operations. The evaluation of pending requests happens in pool manager. Because of this, it gets checked about every 5 seconds (in a low usage environment. I believe this is related to sleep with wake events and the default sleep duration.) Since API is only responding to requests I couldn't consider a way to make this work when driven solely by the API. That is what is creating the window of opportunity to see that the status is pending, even though all of your instances are ready.\n\nPerhaps if we move the check for ready requests into the VM ready check it would eliminate this possibility by marking the request ready at the same time the last instance was ready.\n\n[~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] can you also test with https for the vmpooler URL?", "created": "2020-05-15T15:29:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I updated vmpooler to eliminate this window by checking if a request is ready while moving an instance to ready [https://github.com/puppetlabs/vmpooler/pull/375/commits/b831faf5c05c46c3459dfef52b8241e0d0879d86]", "created": "2020-05-21T18:23:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This capability has been added to vmpooler. There is still some follow up which\u00a0 I will ticket and link back to this related to open feedback on the PR for POOLER-158 when it was merged.", "created": "2020-06-10T11:29:00.000000"}], "components": [], "created": "2020-03-05T11:01:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@59ce4ab9"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzzxwe:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "5.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "14/May/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_81893883_*|*_3_*:*_1_*:*_3964477445_*|*_10009_*:*_1_*:*_2512140350_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_780682767_*|*_10005_*:*_1_*:*_1039673699"}], "description": "In vmpooler it may be useful if the application could support provisioning on demand. One idea for how we could implement this is upon request a transaction ID would be created that a user could poll for updates to see when your group of SUTs requested is ready. This would provide a convenient way to leverage on-demand mechanisms in vmpooler, without having to re-write all of the machine management logic. A tool like vmfloaty could have functionality added to wait for the request to be fulfilled, so it could be used in a similar way to how it is used now.\n\nFor Puppet, ABS still would provide a way to monitor the max number of running machines, and vmpooler would still provide a way to gain visibility into the machines running now. In this world, we may prefer to offer an additional dashboard view that shows what's running and being requested.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10064", "fixedVersions": [], "id": "10064", "issueType": "Task", "key": "POOLER-158", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2020-06-10T11:29:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler should offer a capability to provision on demand", "timeSpent": "PT0S", "updated": "2020-06-10T11:29:00.000000", "votes": "1", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have put up a PR to add this feature to vmpooler.", "created": "2020-03-04T17:47:00.000000"}], "components": [], "created": "2020-03-04T17:41:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@70330e6b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0090w:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_5525_*|*_10009_*:*_1_*:*_605078472_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler should offer a mechanism to load configuration values from additional files. This would be useful with vault so we can target where the injector mounts the volume to.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10182", "fixedVersions": [], "id": "10182", "issueType": "New Feature", "key": "POOLER-157", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-03-11T18:46:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler should be able to load configuration from additional files", "timeSpent": "PT0S", "updated": "2020-03-11T18:46:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "attachments": [], "comments": [{"author": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "body": "Does this have a PR yet?", "created": "2020-03-16T18:03:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "No, I failed to submit the PR. I wrote a test, and it was failing, and I got distracted by redis sentinel instead of finishing this. I will get this wrapped up and the PR submitted.", "created": "2020-03-16T18:38:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I got this updated and the PR submitted. I updated this to detect the failure, log a message, and recover, rather than simply crash the application. I validated that it does in fact recover and continue to perform work.\n\nVmpooler API already detects this failure automatically, and restarts the API service as needed to restore functionality. With this change no operator intervention is required to recover from a redis server failure / restart, assuming the redis server issue is recoverable on its own.", "created": "2020-03-17T12:18:00.000000"}, {"author": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "body": "Tested this out and merged it locally with docker-compose. Waiting on a couple of other PRs to get merged before cutting a release and deploying.", "created": "2020-03-17T13:08:00.000000"}, {"author": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "body": "Build and deployed to prod in 0.11.1", "created": "2020-03-17T18:05:00.000000"}], "components": ["VM Pooler"], "created": "2020-02-26T14:48:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1823aee0"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o003q8:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Mar/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "10008_*:*_1_*:*_17851818_*|*_1_*:*_1_*:*_13351_*|*_3_*:*_2_*:*_63619597_*|*_10009_*:*_2_*:*_358355995_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_1296355316"}], "description": "When the connection to redis from vmpooler API or manager fails the application could crash. In some cases if the redis connection stops working vmpooler will fail to work properly, but not quit the application. We should ensure that when the redis connection is\u00a0 not available that the\u00a0 application reports an error and quits.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10062", "fixedVersions": [], "id": "10062", "issueType": "Improvement", "key": "POOLER-156", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2020-03-17T18:05:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler should crash when redis connection fails", "timeSpent": "PT0S", "updated": "2020-03-17T18:05:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966883", "created": "2023-08-03T06:47:00.000000"}], "components": [], "created": "2020-02-20T12:58:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@353b9378"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|o0002o:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_29011789287_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_79826377661"}], "description": "When vmpooler is passed a invalid JSON blob it will explode and return a 500 error that does not give any indicator of why it failed. The lone exception is the poolreset endpoint. Vmpooler API should return a helpful message indicating it has received invalid JSON, instead of a 500 error.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10111", "fixedVersions": [], "id": "10111", "issueType": "Improvement", "key": "POOLER-155", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:47:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:47 AM", "summary": "Vmpooler API should return a helpful message when provided invalid JSON", "timeSpent": "PT0S", "updated": "2023-08-03T06:47:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I confirmed in the case of absurd-overcast that the migration failed, it was marked as migrated: true, and the hypervisor information listed the target hypervisor as the host.", "created": "2020-02-19T11:39:00.000000"}, {"author": "557058:f23b0b7f-9386-48b3-bd71-4399766356f4", "body": "Would love to pair with someone on this as I'm still getting to know this code", "created": "2020-03-02T09:13:00.000000"}, {"author": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "body": "PR up: https://github.com/puppetlabs/vmpooler/pull/355", "created": "2020-03-03T15:49:00.000000"}, {"author": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "body": "Change is merged, now we just need a VMPooler release", "created": "2020-03-03T17:11:00.000000"}, {"author": "557058:0b16fa29-44bc-4e11-82b0-a346967ad808", "body": "VMpooler gem released and updated version running on marathon", "created": "2020-03-04T17:20:00.000000"}], "components": [], "created": "2020-02-19T11:15:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1e8b9ce4"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|i0078v:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "02/Mar/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_87100001_*|*_3_*:*_3_*:*_106296168_*|*_10009_*:*_2_*:*_7162526_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_1030982980"}], "description": "When vmpooler determines a machine should be migrated it tags the machine has having been migrated in redis, which in turn causes the vm API to show that the VM was migrated. However, if the migration failed we should mark that it was not migrated, because the migration would have failed. While inspecting this code we should validate that the correct hypervisor data is printed for the SUT.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10089", "fixedVersions": [], "id": "10089", "issueType": "Bug", "key": "POOLER-154", "labels": ["pairing"], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2020-03-04T17:20:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "When a machine fails migration its data shows it was migrated", "timeSpent": "PT0S", "updated": "2020-03-04T17:20:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This has been implemented and deployed to production.", "created": "2020-02-14T16:46:00.000000"}], "components": [], "created": "2020-02-05T11:46:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@224e46ab"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzzxvz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8216_*|*_3_*:*_1_*:*_540328842_*|*_10009_*:*_1_*:*_255131390_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_118255"}], "description": "Vmpooler should provide a mechanism to flush pools. At present, we have to change redis data. My intention is to offer an API endpoint like poolreset that will flush pending and ready instances.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10088", "fixedVersions": [], "id": "10088", "issueType": "New Feature", "key": "POOLER-153", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2020-02-14T16:46:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Add endpoint for flushing the VMs of a pool", "timeSpent": "PT0S", "updated": "2020-02-14T16:46:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:d266d245-5cba-4a99-842a-d1e061513459", "body": "Please note that the ability to migrate a VM to another host will require a storage migration in addition to the normal compute migration if this is implemented. Please also note that the drive that is in each of our mac's is also where ESXi lives and, thus, running VM's will impact the performance of the hypervisor itself.", "created": "2019-11-01T07:27:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Do we have capacity in these Mac pros for an additional drive? I think the alternative is biting the bullet and purchasing the 10Gb adapters.\n\nOne additional alternate solution we could consider (though is probably a pain...) is partitioning the disk and putting ESXi and a part of the partition and using the rest for local storage. There is no shortage of performance potential in the Mac Pro SSD, but capacity could be an issue depending on which we got.", "created": "2019-11-01T09:44:00.000000"}], "components": [], "created": "2019-10-30T12:36:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5a65c047"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzy3sf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "01/Nov/19"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8038009156_*|*_6_*:*_1_*:*_0"}], "description": "To use local storage on a hypervisor in vmpooler it would be helpful to have a concept that allows for a single pool to be defined that works with the local storage device of the hypervisors in the target cluster. As an example, in our mac cluster we have local storage available per hypervisor. Without any changes, to have four pools configured on each of five hypervisors we would have to specify each pool five times with different local storage devices specified. This would be a fairly ugly addition to the vmpooler configuration file.\n\nIf vmpooler understood that it should leverage local storage devices, and look there for linked clone templates, then we could define the pool once, and on the backend create multiple pools that are available at the pool alias for that platform. So a pool with a pool size of 1 with local storage would create a VM on each hypervisor. We would possibly have to do some trickery with the pools that are displayed to the API so it looks like a unified pool from an API perspective and shows the proper pool size, etc.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10171", "fixedVersions": [], "id": "10171", "issueType": "New Feature", "key": "POOLER-152", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Won't Do", "resolutionDate": "2020-01-31T12:23:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Support concepts for local storage usage", "timeSpent": "PT0S", "updated": "2020-01-31T12:23:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": [], "created": "2019-10-30T12:17:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@66689e5b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzy3rz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9861262705_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_51219292324"}], "description": "Vmpooler should support configuring migrations on a per cluster basis. Currently, it is possible to turn migrations on and off by setting the migration limit. We should expand this capability to allow disabling migrations on certain clusters, and specifying the migration limit on a per cluster basis if desired.\n\nThis change will support the usage of local storage on some clusters.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10170", "fixedVersions": [], "id": "10170", "issueType": "New Feature", "key": "POOLER-151", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Won't Do", "resolutionDate": "2021-10-06T11:07:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Migration should be configurable per cluster", "timeSpent": "PT0S", "updated": "2021-10-06T11:07:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "After digging further it looks like the primary mechanism provided for avoiding these issues in Sinatra is a capability to set a parameter called \"lock\", which makes all API queries single threaded. This seems to be a pretty extreme solution, and I anticipate could cause problems, in part due to the recent addition of validating that a machine is alive at checkout from the API, because that can take some time, particularly in the case of a failure.\n\nI am going to test further to see if I can resolve this outside of setting \"lock\" in order to avoid the potential performance issues.", "created": "2019-10-14T13:13:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Setting lock looks to be the right solution here. In testing it appears that performance is still ok, at least when checking locally with a handful of windows. We should roll this to production and see how it goes.", "created": "2019-10-16T09:58:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I found a solution that lets us synchronize only the checkout portion of API requests, which should minimize the window for any performance issues. A PR is up for this change.", "created": "2019-10-21T16:42:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been deployed to production in vmpooler 0.7.2.", "created": "2019-10-24T19:33:00.000000"}], "components": ["VM Pooler"], "created": "2019-10-03T16:21:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6402fc4e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzxfyv:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8177_*|*_10007_*:*_1_*:*_3200_*|*_3_*:*_1_*:*_1556347702_*|*_10009_*:*_1_*:*_269525640_*|*_6_*:*_1_*:*_0"}], "description": "[~accountid:557058:295d7a84-a09b-4348-8961-a1e1764c190e] reported a transient test failure with a VM vanishing suddenly during the test without explanation in\u00a0[https://cinext-jenkinsmaster-sre-prod-1.delivery.puppetlabs.net/view/imaging/view/Windows/view/vCenterBase/job/imaging_win-2019_x86_64_vmware_vcenter.cygwin_run_pa_component_acceptance_tests_packer/32/consoleFull]\u00a0. While investigating the failure I noticed that vmpooler data showed a different job URL, where the same SUT is being used for tests\u00a0[https://cinext-jenkinsmaster-enterprise-prod-1.delivery.puppetlabs.net/job/enterprise_pe-acceptance-tests_integration-system_pe_ha-promoted-replica_nightly_2019.2.x/LAYOUT=redhat7-64mdca-64replica.fa-64.fa,LEGACY_AGENT_VERSION=NONE,PLATFORM=NONE,SCM_BRANCH=2019.2.x,UPGRADE_FROM=NONE,UPGRADE_TO_VERSION=NONE,label=beaker/226/console]\u00a0. The failure in the first job and logs about when the SUT was destroyed indicate that the second job URL, which vmpooler shows in its record of the VM, was the reason the machine was torn down.\n\nVmpooler should not have allocated this single SUT twice, so something is not working as expected when the API is retrieving a VM.\n\n\u00a0\n\nKibana log for messages containing SUT name -\u00a0[http://kibana.ops.puppetlabs.net/?#/discover?_g=(refreshInterval:(display:Off,pause:!f,section:0,value:0),time:(from:'2019-10-02T22:23:50.641Z',mode:absolute,to:'2019-10-03T22:23:50.641Z'))&_a=(columns:!(message),index:%5Blogspout-%5DYYYY.MM,interval:auto,query:(query_string:(analyze_wildcard:!t,query:'message:*is9o8abcff60a6a*')),sort:!('@timestamp',desc))|http://kibana.ops.puppetlabs.net/#/discover?_g=(refreshInterval:(display:Off,pause:!f,section:0,value:0),time:(from:'2019-10-02T22:23:50.641Z',mode:absolute,to:'2019-10-03T22:23:50.641Z'))&_a=(columns:!(message),index:%5Blogspout-%5DYYYY.MM,interval:auto,query:(query_string:(analyze_wildcard:!t,query:'message:*is9o8abcff60a6a*')),sort:!('@timestamp',desc))]", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10013", "fixedVersions": [], "id": "10013", "issueType": "Bug", "key": "POOLER-150", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Critical", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2019-10-24T19:33:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler allocated single SUT to multiple checkout requests", "timeSpent": "PT0S", "updated": "2019-10-24T19:33:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623e787ea9575800695c094a", "attachments": [], "comments": [{"author": "623e787ea9575800695c094a", "body": "GitHub pull request opened:\n\nhttps://github.com/puppetlabs/vmpooler/pull/338", "created": "2019-08-30T18:05:00.000000"}], "components": ["VM Pooler"], "created": "2019-08-30T17:27:00.000000", "creator": "623e787ea9575800695c094a", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@68968c83"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzwr5j:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_318232988_*|*_10009_*:*_1_*:*_12972709447_*|*_6_*:*_1_*:*_0"}], "description": "h1. Describe the Bug\nIn file `lib/vmpooler/pool_manager.rb`, function _check_ready_vm, after running vmpooler for a while, vmpooler gets a runtime error of `vm['boottime']` being an undefined variable.\n\nh1. Expected Behavior\nvmpooler does not get an undefined variable error in _check_ready_vm.\n\nh1. Steps to Reproduce\nSteps to reproduce the behavior:\n1. Start vmpooler with a new pool and no clients using it.\u00a0 Set a timeout on VMs for a few minutes so that they will expire and get replaced periodically.\n2. Run it empty for a little while so that the TTL of VMs can expire and they will get cycled with new ones.\n3. If I understand correctly, the problem should reproduce itself on its own under these conditions.\u00a0 Maybe this issue is not detected when the pool is consistently under heavy load or if the VM timeout is set large enough to be irrelevant.\n\nh1. Environment\n\u00a0- Version: Should affect any version after pull request #269, e.g. 0.1.0 and newer\n\u00a0- Platform: This should affect any platform, tested vmpooler server on Red Hat 7.6 and Windows 10\n\nh1. Additional Context\nNone.\u00a0 The fix for this issue should be trivial and only a few lines of code.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10144", "fixedVersions": [], "id": "10144", "issueType": "Bug", "key": "POOLER-148", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "623e787ea9575800695c094a", "resolution": "Fixed", "resolutionDate": "2020-01-31T12:23:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Refactoring from pull request #269 introduced undefined variable bug", "timeSpent": "PT0S", "updated": "2020-01-31T12:23:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This work was completed in vmpooler on [https://github.com/puppetlabs/vmpooler/pull/333]\u00a0. I just need to cut a release and deploy the new version. I plan to do this on 8/21.", "created": "2019-08-20T14:22:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've deployed this change and flushed centos-6 pools. However, it appears as though vmpooler is still creating linked clones judging by the clone time. I am currently investigating why.", "created": "2019-08-21T16:36:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I identified and resolved the issue that prevented this feature from working as expected. I'm deploying the new version to vmpooler and will update this ticket once the instance has been restarted and centos6 pools have been refreshed.", "created": "2019-08-26T16:25:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Vmpooler can create full clones now. However, the feature did not work very well, and enabling it created several issues in the vmpooler application. Specifically, every operation interacting with a full clone has a lot more data to work with, and takes many times longer to perform administrative interactions with, such as migrating, or destroying the VM, in addition to the significantly longer clone time. As a result, the theory about full clones being able to improve performance for a specific SUT appears to be off base. It may be possible that an individual SUT may be more performant during a test, however the increase in interaction times for administrative operations make it unworkable. All vmpooler connections become saturated with long lived (several minutes) operations related to the affected pool.\n\nI will discuss with QE team whether we should revert this change on vmpooler.\n\n\n\nI have reverted the application to 0.7.0, which does not create full clones.", "created": "2019-08-27T17:17:00.000000"}], "components": [], "created": "2019-08-20T14:17:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@daf6d79"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzwi07:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8214_*|*_10009_*:*_1_*:*_1208754243_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_2_*:*_72031589"}], "description": "Vmpooler should be able to make the linked clone functionality optional. This would allow for full clones to be used when needed, or to disable linked clones in vmpooler entirely.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10169", "fixedVersions": [], "id": "10169", "issueType": "New Feature", "key": "POOLER-147", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2019-09-04T10:03:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Allow vmpooler to create full clones", "timeSpent": "PT0S", "updated": "2019-09-04T10:03:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "[~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0] as long as the template will handle the IPv6 connection then we could add the capability to vmpooler to set the VIF at clone time. Then, for IPv6 pools we would only need to add that parameter to the pool configuration, and allow vsphere to handle updating that configuration for the VM before first power on. I think it would be ideal compared to having to manage the issue in the template, but it does rely on a new capability.\nOne of the pyvmomi community samples, [change_vm_vif|https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/change_vm_vif.py]\u00a0shows an example of what we would have to set. I have used this before to change the network for a set of templates.", "created": "2019-07-31T13:50:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Added two pools in vmpooler that have images manually set to have a secondary NIC connected to the ipv6 enabled network \u2018ipv6_ds\u2019. They can be grabbed via floaty\n{code}\nredhat-fips-7-x86_64-pixa3-ipv6\nwin-2016-x86_64-pixa3-ipv6\n{code}\nThe redhat fips image will need network scripts setup properly (IMAGING ticket to be opened) but I\u2019ve done it manually\n{code}\nifdown ens192\nmv /etc/sysconfig/network-scripts/ifcfg-eno16777736 /etc/sysconfig/network-scripts/ifcfg-ens192\nsed -i s/eno16777736/ens192/g /etc/sysconfig/network-scripts/ifcfg-ens192\nsed -i s/59c27e81-6cc6-4fcb-88ff-164d3aa1f932/59c27e81-6cc6-4fcb-88ff-164d3aa1f933/g /etc/sysconfig/network-scripts/ifcfg-ens192\nifup ens192\n{code}\n\nThe windows machine seem to have goten it's IPv6 on the secondary NIC properly, but it failed to connect with ping -6 ipv6.google.com without using the -S flag for the source IP/NIC (to be investigated)\n\nI tested successfully:\ntwo vmpooler machines can connect via IPv6 addresses", "created": "2019-08-16T15:12:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "This ticket only relates to the FIPS images at the moment, so the actual action is to update the new FIPS images (linux ones in particular) to work on IPV6. See IMAGES-1145 which [~accountid:70121:4596f954-1fdf-472f-a700-6b7c2ffbc525] is working on at the moment.", "created": "2019-09-26T09:55:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "Pools for IPV6 are up and running:\n* {{win-2016-x86_64-ipv6}} \n* {{redhat-fips-7-x86_64-ipv6}}\n\nThanks [~accountid:70121:4596f954-1fdf-472f-a700-6b7c2ffbc525] for doing the actual legwork on these.", "created": "2019-10-02T10:54:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I'm good with that. Thanks", "created": "2019-10-02T11:09:00.000000"}], "components": [], "created": "2019-07-31T12:48:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@710838d8"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzwiwf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "5.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "31/Jul/19"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_5433764744_*|*_3_*:*_1_*:*_2632458_*|*_10009_*:*_1_*:*_898205_*|*_6_*:*_1_*:*_0"}], "description": "Currently waiting on infracore to setup the new VLAN network in vmware.\n\nInitially we would setup a pool for redhat-fips that has IPv6 enabled in its vmware network. \n\nWe would need to take the image template, add a new NIC connected to the vcenter network called 'ipv6_ds' with IPv6 enabled on it. There might or might not be new network-scripts to create to enable ipv6.\n\nShould we use an imaging pipeline to produce those images? [~accountid:5abd613dd4cf3c56be24b70d]\n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10104", "fixedVersions": [], "id": "10104", "issueType": "Improvement", "key": "POOLER-146", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Fixed", "resolutionDate": "2019-10-02T11:09:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Setup IPv6 vmpooler pools", "timeSpent": "PT0S", "updated": "2019-10-02T11:09:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "5abd613dd4cf3c56be24b70d", "attachments": [], "comments": [{"author": "5abd613dd4cf3c56be24b70d", "body": "seems like the remote for scientific linux is broken. https://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/ only contains a few packages.\n\n/cc [~accountid:557058:55b23daa-f1aa-41fd-b10d-bf49a61932da],  [~accountid:557058:ab1874a9-45ab-4efc-91aa-5200c165b2c4] \n\nthe vmpooler image _localmirror-os.repo_ points to baseurl=https://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os", "created": "2019-07-29T07:56:00.000000"}, {"author": "557058:9e548bcc-dbc3-4c04-a047-0344921dbb97", "body": "[~accountid:5abd613dd4cf3c56be24b70d]\u00a0when testing the artifactory connection to the mirror url [https://mirror.lstn.net/scientific/]\u00a0artifactory is giving me a 403 forbidden error. Is this something we need to get permissions for? Or is there a different mirror we can use?", "created": "2019-07-29T10:40:00.000000"}, {"author": "557058:9e548bcc-dbc3-4c04-a047-0344921dbb97", "body": "I found\u00a0-[http://ftp1.scientificlinux.org/linux/scientific/6/x86_64/os/Packages/]-\u00a0that we can use\n\nEdit: using\u00a0[http://mirror.grid.uchicago.edu/pub/linux/scientific/]\u00a0instead", "created": "2019-07-29T11:08:00.000000"}, {"author": "557058:9e548bcc-dbc3-4c04-a047-0344921dbb97", "body": "Updated the mirror to the one above, let me know if you run into any more issues", "created": "2019-07-29T11:12:00.000000"}, {"author": "557058:291cd4a1-794b-4442-93e6-929db39e48f7", "body": "Progress, a different error though\n{code:java}\n\u279c  puppetlabs-apache git:(FM-8214) sshvm gjgad1zg7cysfqa.delivery.puppetlabs.net\nThe authenticity of host 'gjgad1zg7cysfqa.delivery.puppetlabs.net (10.32.118.255)' can't be established.\nRSA key fingerprint is SHA256:KdDsd8+UwiDO/WUnPMyapvianLpgorLiyMwtzO0D6uw.\nAre you sure you want to continue connecting (yes/no)? yes\nWarning: Permanently added 'gjgad1zg7cysfqa.delivery.puppetlabs.net,10.32.118.255' (RSA) to the list of known hosts.\nLast login: Fri Mar 23 19:59:17 2018 from 172.16.119.1\n[root@gjgad1zg7cysfqa ~]# yum install mysql-server\nLoaded plugins: security\nSetting up Install Process\nlocalmirror-os                                                                                                | 3.7 kB     00:00\nlocalmirror-os/primary_db                                                                                     | 4.5 MB     00:00\nlocalmirror-updates-fastbugs                                                                                  | 2.9 kB     00:00\nlocalmirror-updates-fastbugs/primary_db                                                                       |  22 kB     00:00\nlocalmirror-updates-security                                                                                  | 2.9 kB     00:00\nlocalmirror-updates-security/primary_db                                                                       | 1.2 MB     00:00\nResolving Dependencies\n--> Running transaction check\n---> Package mysql-server.x86_64 0:5.1.73-8.el6_8 will be installed\n--> Processing Dependency: mysql = 5.1.73-8.el6_8 for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Processing Dependency: perl-DBI for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Processing Dependency: perl-DBD-MySQL for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Processing Dependency: perl(DBI) for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Running transaction check\n---> Package mysql.x86_64 0:5.1.73-8.el6_8 will be installed\n--> Processing Dependency: mysql-libs = 5.1.73-8.el6_8 for package: mysql-5.1.73-8.el6_8.x86_64\n---> Package perl-DBD-MySQL.x86_64 0:4.013-3.el6 will be installed\n---> Package perl-DBI.x86_64 0:1.609-4.el6 will be installed\n--> Running transaction check\n---> Package mysql-libs.x86_64 0:5.1.73-7.el6 will be updated\n---> Package mysql-libs.x86_64 0:5.1.73-8.el6_8 will be an update\n--> Finished Dependency ResolutionDependencies Resolved=====================================================================================================================================\n Package                          Arch                     Version                            Repository                        Size\n=====================================================================================================================================\nInstalling:\n mysql-server                     x86_64                   5.1.73-8.el6_8                     localmirror-os                   8.6 M\nInstalling for dependencies:\n mysql                            x86_64                   5.1.73-8.el6_8                     localmirror-os                   894 k\n perl-DBD-MySQL                   x86_64                   4.013-3.el6                        localmirror-os                   133 k\n perl-DBI                         x86_64                   1.609-4.el6                        localmirror-os                   704 k\nUpdating for dependencies:\n mysql-libs                       x86_64                   5.1.73-8.el6_8                     localmirror-os                   1.2 MTransaction Summary\n=====================================================================================================================================\nInstall       4 Package(s)\nUpgrade       1 Package(s)Total download size: 12 M\nIs this ok [y/N]: y\nDownloading Packages:\n(1/5): mysql-5.1.73-8.el6_8.x86_64.rpm                                                                        | 894 kB     00:00\nhttps://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/mysql-libs-5.1.73-8.el6_8.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nTo address this issue please refer to the below knowledge base article https://access.redhat.com/articles/1320623If above article doesn't help to resolve this issue please open a ticket with Red Hat Support.https://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/mysql-server-5.1.73-8.el6_8.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nhttps://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/perl-DBD-MySQL-4.013-3.el6.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nhttps://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/perl-DBI-1.609-4.el6.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nError Downloading Packages:\n  perl-DBI-1.609-4.el6.x86_64: failure: Packages/perl-DBI-1.609-4.el6.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.\n  perl-DBD-MySQL-4.013-3.el6.x86_64: failure: Packages/perl-DBD-MySQL-4.013-3.el6.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.\n  mysql-libs-5.1.73-8.el6_8.x86_64: failure: Packages/mysql-libs-5.1.73-8.el6_8.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.\n  mysql-server-5.1.73-8.el6_8.x86_64: failure: Packages/mysql-server-5.1.73-8.el6_8.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.[root@gjgad1zg7cysfqa ~]# {code}", "created": "2019-07-30T03:24:00.000000"}, {"author": "557058:9e548bcc-dbc3-4c04-a047-0344921dbb97", "body": "Hmm that's odd, those packages weren't doing that yesterday. I changed the mirror once more to help determine the cause. Currently all of those packages exist on artifactory and are not giving me a 404. I'll keep an eye on things today and see if I can pinpoint what's going on. Thank you for your patience!", "created": "2019-07-30T09:00:00.000000"}, {"author": "557058:291cd4a1-794b-4442-93e6-929db39e48f7", "body": "Thanks [~accountid:557058:9e548bcc-dbc3-4c04-a047-0344921dbb97] and [~accountid:5abd613dd4cf3c56be24b70d]. All seems good now !\u00a0 Closing the ticket", "created": "2019-07-31T04:02:00.000000"}], "components": ["VM Pooler"], "created": "2019-07-29T07:46:00.000000", "creator": "557058:291cd4a1-794b-4442-93e6-929db39e48f7", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@46542c21"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzvzg7:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "29/Jul/19"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_12529073_*|*_5_*:*_1_*:*_0_*|*_10011_*:*_1_*:*_58194482_*|*_10006_*:*_1_*:*_88656808"}], "description": "Steps to reproduce, provision a machine, try to install mysql\n{code:java}\n\u279c  puppetlabs-mysql git:(FM-7709) sshvm js7k742xkv2zi24.delivery.puppetlabs.net\nThe authenticity of host 'js7k742xkv2zi24.delivery.puppetlabs.net (10.32.115.151)' can't be established.\nRSA key fingerprint is SHA256:KdDsd8+UwiDO/WUnPMyapvianLpgorLiyMwtzO0D6uw.\nAre you sure you want to continue connecting (yes/no)? yes\nWarning: Permanently added 'js7k742xkv2zi24.delivery.puppetlabs.net,10.32.115.151' (RSA) to the list of known hosts.\nLast login: Fri Mar 23 19:59:17 2018 from 172.16.119.1\n[root@js7k742xkv2zi24 ~]# yum install mysql-server\nLoaded plugins: security\nSetting up Install Process\nlocalmirror-os                                                                                                | 3.7 kB     00:00\nlocalmirror-os/primary_db                                                                                     | 4.5 MB     00:00\nlocalmirror-updates-fastbugs                                                                                  | 2.9 kB     00:00\nlocalmirror-updates-fastbugs/primary_db                                                                       |  22 kB     00:00\nlocalmirror-updates-security                                                                                  | 2.9 kB     00:00\nlocalmirror-updates-security/primary_db                                                                       | 1.2 MB     00:00\nResolving Dependencies\n--> Running transaction check\n---> Package mysql-server.x86_64 0:5.1.73-8.el6_8 will be installed\n--> Processing Dependency: mysql = 5.1.73-8.el6_8 for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Processing Dependency: perl-DBI for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Processing Dependency: perl-DBD-MySQL for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Processing Dependency: perl(DBI) for package: mysql-server-5.1.73-8.el6_8.x86_64\n--> Running transaction check\n---> Package mysql.x86_64 0:5.1.73-8.el6_8 will be installed\n--> Processing Dependency: mysql-libs = 5.1.73-8.el6_8 for package: mysql-5.1.73-8.el6_8.x86_64\n---> Package perl-DBD-MySQL.x86_64 0:4.013-3.el6 will be installed\n---> Package perl-DBI.x86_64 0:1.609-4.el6 will be installed\n--> Running transaction check\n---> Package mysql-libs.x86_64 0:5.1.73-7.el6 will be updated\n---> Package mysql-libs.x86_64 0:5.1.73-8.el6_8 will be an update\n--> Finished Dependency ResolutionDependencies Resolved=====================================================================================================================================\n Package                          Arch                     Version                            Repository                        Size\n=====================================================================================================================================\nInstalling:\n mysql-server                     x86_64                   5.1.73-8.el6_8                     localmirror-os                   8.6 M\nInstalling for dependencies:\n mysql                            x86_64                   5.1.73-8.el6_8                     localmirror-os                   894 k\n perl-DBD-MySQL                   x86_64                   4.013-3.el6                        localmirror-os                   133 k\n perl-DBI                         x86_64                   1.609-4.el6                        localmirror-os                   704 k\nUpdating for dependencies:\n mysql-libs                       x86_64                   5.1.73-8.el6_8                     localmirror-os                   1.2 MTransaction Summary\n=====================================================================================================================================\nInstall       4 Package(s)\nUpgrade       1 Package(s)Total download size: 12 M\nIs this ok [y/N]: y\nDownloading Packages:\nhttps://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/mysql-5.1.73-8.el6_8.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nTo address this issue please refer to the below knowledge base articlehttps://access.redhat.com/articles/1320623If above article doesn't help to resolve this issue please open a ticket with Red Hat Support.https://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/mysql-libs-5.1.73-8.el6_8.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nhttps://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/mysql-server-5.1.73-8.el6_8.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nhttps://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/perl-DBD-MySQL-4.013-3.el6.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nhttps://artifactory.delivery.puppetlabs.net/artifactory/rpm__remote_scientific/6/x86_64/os/Packages/perl-DBI-1.609-4.el6.x86_64.rpm: [Errno 14] PYCURL ERROR 22 - \"The requested URL returned error: 404 Not Found\"\nTrying other mirror.\nError Downloading Packages:\n  perl-DBI-1.609-4.el6.x86_64: failure: Packages/perl-DBI-1.609-4.el6.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.\n  perl-DBD-MySQL-4.013-3.el6.x86_64: failure: Packages/perl-DBD-MySQL-4.013-3.el6.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.\n  mysql-5.1.73-8.el6_8.x86_64: failure: Packages/mysql-5.1.73-8.el6_8.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.\n  mysql-server-5.1.73-8.el6_8.x86_64: failure: Packages/mysql-server-5.1.73-8.el6_8.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.\n  mysql-libs-5.1.73-8.el6_8.x86_64: failure: Packages/mysql-libs-5.1.73-8.el6_8.x86_64.rpm from localmirror-os: [Errno 256] No more mirrors to try.[root@js7k742xkv2zi24 ~]# {code}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10168", "fixedVersions": [], "id": "10168", "issueType": "Task", "key": "POOLER-145", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:291cd4a1-794b-4442-93e6-929db39e48f7", "resolution": "Fixed", "resolutionDate": "2019-07-31T04:02:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "scientific linux 6, mirror list is not working", "timeSpent": "PT0S", "updated": "2019-07-31T04:02:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Rather than using guestinfo I think we should add this data to the vm API endpoint.", "created": "2019-08-20T13:36:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been deployed. You can now see what host a VM is running on. If it was migrated it will be reported in the VM data.\n{code:java}\ncurl vmpooler.delivery.puppetlabs.net/api/v1/vm/dfvivzor8bc9kpi\n{\n  \"ok\": true,\n  \"dfvivzor8bc9kpi\": {\n    \"template\": \"redhat-7-x86_64-pixa3\",\n    \"lifetime\": 12,\n    \"running\": 0.09,\n    \"remaining\": 11.91,\n    \"start_time\": \"2019-08-21T22:29:05+00:00\",\n    \"end_time\": \"2019-08-22T10:29:05+00:00\",\n    \"state\": \"running\",\n    \"tags\": {\n      \"jenkins_build_url\": \"https://cinext-jenkinsmaster-enterprise-prod-1.delivery.puppetlabs.net/job/enterprise_pe-puppet-server-extensions_integration-system_smoke-lovejoy/LAYOUT=redhat7-64mdca,LDAP_TYPE=default,SAUCE=default,UNEEDED=UNEEDED,label=beaker/35/\",\n      \"created_by\": \"run-me-maybe\"\n    },\n    \"ip\": \"10.16.115.62\",\n    \"domain\": \"delivery.puppetlabs.net\",\n    \"host\": \"pix-jj27-c1-7.ops.puppetlabs.net\"\n  } {code}", "created": "2019-08-21T16:37:00.000000"}], "components": ["VM Pooler"], "created": "2019-06-10T12:06:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@43a532c1"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hykf8n:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_167387766_*|*_3_*:*_1_*:*_22792888_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_6046887661"}], "description": "When running tests on a SUT it would be helpful to be able to identify, from the VM, what its parent host is. This ticket serves to track implementing this capability so a VM could retrieve its host from VM tools within the SUT.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10110", "fixedVersions": [], "id": "10110", "issueType": "New Feature", "key": "POOLER-142", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2019-08-21T16:37:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Set hypervisor host data on VM in vmpooler so it may be retrieved from a SUT", "timeSpent": "PT0S", "updated": "2019-08-21T16:37:00.000000", "votes": "1", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2019-04-11T13:24:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7d6f57b5"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": ["557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72"]}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hztz87:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_7155_*|*_10007_*:*_1_*:*_6966542_*|*_3_*:*_1_*:*_243894_*|*_10009_*:*_1_*:*_506358037_*|*_6_*:*_1_*:*_0"}], "description": "vmpooler evaluates VMs for migration in a last in, first out order. This happens because the list received from smembers in redis is iterated over to perform migrations for VMs waiting, and we add items to this set in redis with `sadd`, which adds the member to the beginning of the set. We should reverse the list before iterating over it in order to process the oldest members of the list first.\n\nThis issue also affects pending state evaluation.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10103", "fixedVersions": [], "id": "10103", "issueType": "Improvement", "key": "POOLER-141", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2019-04-17T12:04:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Migrations and pending checks are processed LIFO processed", "timeSpent": "PT0S", "updated": "2019-04-17T12:04:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Hey Matt,\n\nCouple of thoughts:\n\n* Did you want to just turn down the ready_ttl?\nhttps://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L1062\n\n* Right now you can't synchronously call a ready check on checkout out because the handout is on the API side and the checks are on the Pool Manager.  Making this synchronous could be problematic and cause longer checkout times.\n\n", "created": "2019-04-02T20:54:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I was considering duplicating the `vm_ready?` check and running that from the API on checkout. I expect there are sufficient mechanisms to catch these before handing out the VMs already, and I see those catching several VMs that aren't alive any longer, but there are multiple reports of VMs being handed out each day that don't exist any longer, so I was considering whether the few milliseconds to validate the machine is up would be worthwhile during checkout if it serves as a way to help eliminate this failure.\n\nLooking at our configuration, we're not actually setting a ready_ttl, which is resulting it being set at 0 if I'm reading this correctly. Maybe that's a part of our problem.\n\nhttps://github.com/puppetlabs/vmpooler/blob/25731194e315ec6a18253507196bd9cbd2f10491/lib/vmpooler/pool_manager.rb#L929", "created": "2019-04-02T22:22:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Oh Whoops...\nThat settings defines how long a VM will stay in ready until it's destroyed\n\nhttps://github.com/puppetlabs/vmpooler/blob/25731194e315ec6a18253507196bd9cbd2f10491/lib/vmpooler/pool_manager.rb#L163\n\n", "created": "2019-04-02T23:40:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "It's this line\n\nhttps://github.com/puppetlabs/vmpooler/blob/25731194e315ec6a18253507196bd9cbd2f10491/lib/vmpooler/pool_manager.rb#L154\n\nIt'll be the per pool setting {{vm_checktime}}\n\n\n", "created": "2019-04-02T23:44:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Ha... I should really read my own documentation first...\n\n\nhttps://github.com/puppetlabs/vmpooler/blob/310dc7cbc98d241fa7c3ddd3bcf20311a728ea88/vmpooler.yaml.example#L377-L379", "created": "2019-04-02T23:47:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Looking at this more closely, when we check out a VM it is removed from the ready queue and then added to the running queue. If the loop for the pool runs and catches the VM between queues then it will \"discover\" it and and then destroy it, which is a part of what appears to be happening when this issue arises. Before implementing a check like this I'm going to change how we check out the VM and leverage `smove` in redis, instead of `spop` and then `sadd` in order to eliminate this window. It should be apparent within a few days if this was the issue or not once the change is deployed.", "created": "2019-04-05T14:49:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The change described in the last comment has finally been deployed.", "created": "2019-04-11T12:59:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "QENG-7324 reports issues that look similar to this. As a result I've reopened this issue and will try implementing the 'vm_ready?' check at checkout time.", "created": "2019-07-09T10:23:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been merged on vmpooler.", "created": "2019-07-17T16:31:00.000000"}], "components": ["VM Pooler"], "created": "2019-04-02T20:09:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5a0a1992"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hztp33:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Apr/19"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_83421_*|*_10007_*:*_1_*:*_173019108_*|*_3_*:*_2_*:*_1058882425_*|*_10009_*:*_1_*:*_518988173_*|*_4_*:*_1_*:*_53160_*|*_5_*:*_1_*:*_7172093814_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_169677358_*|*_10005_*:*_1_*:*_52557286"}], "description": "On occasion vmpooler will return a host that is not reachable. When ABS provides this host to the test it causes the test to attempt to connect to the VM that is no longer alive, retry, and continuously fail until interrupted, or eventually the test fails. Vmpooler discovers the machine is no longer reachable just after checkout, but some reason on occasion is not making this determination until it has been handed out.\n\nReviewing logs vmpooler is identifying machines that fail on a regular basis and removing them, but clearly some are being handed out that are not alive any longer. Validating the machine is working at checkout should resolve this.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10012", "fixedVersions": [], "id": "10012", "issueType": "Improvement", "key": "POOLER-140", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2019-07-17T16:31:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Validate VM is still reachable before handing out", "timeSpent": "PT0S", "updated": "2019-07-17T16:31:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "attachments": [], "comments": [{"author": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "body": "[~accountid:5b8813dc7a33352bf10f680c] [~accountid:5abd613dd4cf3c56be24b70d] This has been deployed to the dev instance of vmpooler.\n\nIt will be in production during the maint. window this Wednesday.", "created": "2019-07-29T11:43:00.000000"}], "components": [], "created": "2019-03-13T03:24:00.000000", "creator": "5b8813dc7a33352bf10f680c", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2c973317"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzve7r:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "29/Jul/19"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_11360363965_*|*_10007_*:*_1_*:*_6545171_*|*_3_*:*_1_*:*_412658031_*|*_10009_*:*_1_*:*_623372239_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_17805439_*|*_10005_*:*_1_*:*_155819796"}], "description": "At the moment vmpooler api does not allow to update via HTTP requests the configuration for a dev-pool with the option for cluster.\n\nModifying the api to allow also the cluster setting to be changed would be great for the imaging pipeline because we don't have to use two dev-pools (one for mac os and one for the windows/linux)", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10011", "fixedVersions": [], "id": "10011", "issueType": "Story", "key": "POOLER-143", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "5b8813dc7a33352bf10f680c", "resolution": "Fixed", "resolutionDate": "2019-08-05T16:53:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Modify vmpooler api to allow cluster option to be saved", "timeSpent": "PT0S", "updated": "2019-08-05T16:53:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a PR for review to resolve this.", "created": "2019-02-13T17:45:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been deployed.", "created": "2019-02-14T17:50:00.000000"}], "components": [], "created": "2019-02-13T16:34:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@588ac4bf"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzsscf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_319134_*|*_10007_*:*_1_*:*_86730615_*|*_3_*:*_1_*:*_3935455_*|*_10009_*:*_1_*:*_2659_*|*_6_*:*_1_*:*_0"}], "description": "VMpooler SUT allocation runs in the API, while pool management runs via pool_manager. When a system is allocated a method is called, 'account_for_starting_vm'. Before this is called we iterate over each requested VM and try to retrieve it. Only once each resource has been retrieved do we add the VM back to the running queue. We should move the VM to the running queue as soon as it is allocated to ensure that it can not be discovered by pool_manager during the time it takes to allocate all VMs requested.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10109", "fixedVersions": [], "id": "10109", "issueType": "Bug", "key": "POOLER-139", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2019-02-14T17:50:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "VM can be discovered and destroyed when requesting multiple machines", "timeSpent": "PT0S", "updated": "2019-02-14T17:50:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a PR for this change. While testing it I removed the redis-server testing requirement for vmpooler.", "created": "2019-01-30T16:51:00.000000"}], "components": ["VM Pooler"], "created": "2019-01-17T14:02:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@422b3da9"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzs9hj:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1133322520_*|*_10007_*:*_1_*:*_174669111_*|*_5_*:*_1_*:*_0"}], "description": "Vmpooler sets a single pool as an alias when aliases are configured. If more than one pool is configured to use an alias in a configuration file then the last pool is used. Vmpooler should support using multiple pools for a single alias.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10010", "fixedVersions": [], "id": "10010", "issueType": "Improvement", "key": "POOLER-138", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2019-02-01T17:22:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler should support multiple pools for an alias", "timeSpent": "PT0S", "updated": "2019-02-01T17:22:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a PR that fixes this for migration_limit as well as other environment variables available for configuration. I've also added tests to validate that things now work as expected.", "created": "2019-01-17T11:45:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been merged. It will get deployed along with the next release.", "created": "2019-01-30T16:51:00.000000"}], "components": [], "created": "2018-12-17T11:06:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4205efc6"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzrqs7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2182138307_*|*_10007_*:*_1_*:*_1061942817_*|*_3_*:*_1_*:*_498575816_*|*_10009_*:*_1_*:*_79609145_*|*_6_*:*_1_*:*_0"}], "description": "When configuring the migration_limit value via the MIGRATION_LIMIT environment variable the value is interpreted as a string. A string value causes migrations to be disabled. Vmpooler should convert a string value to an integer where required for the migration_limit setting so setting this parameter does not break migrations. While investigating this, we should scan to see if other parameters would behave the same way when set via environment variable and, if so, fix them, so this mechanism for configuring vmpooler works as expected.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10108", "fixedVersions": [], "id": "10108", "issueType": "Bug", "key": "POOLER-137", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2019-01-30T16:51:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Setting migration_limit to string disables migrations at checkout", "timeSpent": "PT0S", "updated": "2019-01-30T16:51:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "Closing out this ticket, feel free to reopen if this is still a priority. ", "created": "2020-02-20T13:07:00.000000"}], "components": [], "created": "2018-11-26T11:11:00.000000", "creator": "557058:68216498-c6bf-4e8f-92c1-ac84887b7c02", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1a4c404d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzrcgn:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "20/Feb/20"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_38973350427_*|*_6_*:*_1_*:*_0"}], "description": "To allow vmpooler to run unmodified third-party appliances natively (instead of having to modify the images, or require nested virtualisation) vmpooler should be able to handle images that do not send their hostname as part of the initial dhcp handshake.\n\ncc [~accountid:557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d]", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10087", "fixedVersions": [], "id": "10087", "issueType": "Improvement", "key": "POOLER-136", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:68216498-c6bf-4e8f-92c1-ac84887b7c02", "resolution": "Won't Do", "resolutionDate": "2020-02-20T13:07:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler hostname handling", "timeSpent": "PT0S", "updated": "2020-02-20T13:07:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Vmpooler should not hand out half-booted images. However, the check that vmpooler performs is pretty rudimentary. It only verifies that it can open a socket to the VM at the target address. In the case of the Cisco platform it's running a nested KVM hypervisor in order to actually provide the Cisco OS. I think it's likely that in the case you saw there was an issue with this process, even though the host is reachable at its DNS name.\n\nI think if we were to implement an additional check that should happen in the first-boot script for the host, rather than from vmpooler, since vmpooler doesn't perform in depth inspections of SUTs.\n\nCan you tell me more about what you encountered when you got a machine that did not seem to have completed its bootstrapping?", "created": "2018-11-26T11:42:00.000000"}, {"author": "557058:68216498-c6bf-4e8f-92c1-ac84887b7c02", "body": "bq. Vmpooler should not hand out half-booted images.\n\n100% agree.\n\nbq. However, the check that vmpooler performs is pretty rudimentary. It only verifies that it can open a socket to the VM at the target address.\n\na socket to what?\n\nbq. In the case of the Cisco platform it's running a nested KVM hypervisor in order to actually provide the Cisco OS. I think it's likely that in the case you saw there was an issue with this process, even though the host is reachable at its DNS name.\n\nThat, too, is my current assumption.\n\nbq. I think if we were to implement an additional check that should happen in the first-boot script for the host, rather than from vmpooler, since vmpooler doesn't perform in depth inspections of SUTs.\n\nThis is directly opposed to what I believe where vmpooler should be going. See POOLER-136 for additional context. The nested KVM solution is a crude work-around that is adding complexity, and severely hampering our ability to add new images to vmpooler (see e.g. PA-2283).\n\nbq. Can you tell me more about what you encountered when you got a machine that did not seem to have completed its bootstrapping?\n\nThe test trying to connect to the machine keels over:\n\n{code}\n \u001b[00", "created": "2018-11-28T04:37:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966871", "created": "2023-08-03T06:40:00.000000"}], "components": [], "created": "2018-11-26T11:08:00.000000", "creator": "557058:68216498-c6bf-4e8f-92c1-ac84887b7c02", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@406d0338"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzrcgf:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "26/Nov/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_147810724464_*|*_6_*:*_1_*:*_0"}], "description": "When testing the cisco-nxos-9k images on vmpooler, I noticed that the images have not fully booted before being handed out by vmpooler. Usually this is not a problem, as the images are sitting around for a while before they're being used. For reasons of reliability I'd definitely would like vmpooler to stop this though.\n\nIs there a way to add a health check so that vmpooler does not hand out half-booted images?", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10061", "fixedVersions": [], "id": "10061", "issueType": "Improvement", "key": "POOLER-135", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:68216498-c6bf-4e8f-92c1-ac84887b7c02", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:40:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:40 AM", "summary": "health checks for images", "timeSpent": "PT0S", "updated": "2023-08-03T06:40:00.000000", "votes": "1", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a PR for this that should help us get this going.", "created": "2018-12-07T14:00:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This has been enabled. I've started to build out an initial dashboard to explore the stats\u00a0http://grafana.ops.puppetlabs.net/dashboard/db/vmpooler-usage?from=now-1h&to=now&editorTab=Metrics", "created": "2018-12-10T18:26:00.000000"}], "components": ["VM Pooler"], "created": "2018-11-06T14:46:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@f1546ce"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzr0o7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_52457_*|*_10007_*:*_1_*:*_438602399_*|*_3_*:*_1_*:*_184362745_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_2_*:*_2327774449"}], "description": "QE wants to be able to identify how many vmpooler SUTs jenkins jobs are using. Vmpooler currently writes the job URL to the redis data stored for the VM, so this data is available currently, but we do not ship it to graphite. There are a few considerations when implementing this change to ensure the data is useful for us.\n * Identify SUTs allocated by ABS vs adhoc\n * Parse the job URL, when present, to identify the instance, value stream, project and branch name\n * Increment counters for running SUT for the appropriate instance > value stream > project > branch path\n * For any VM that does not have a jenkins job URL we should group it into an additional group for easy correlation\n * We may want a switch to allow for enabling / disabling this stat\n * I think this data is set at checkout, or when the VM is moved from ready to running. If it's reasonable we may consider parsing and shipping the data at the same time we're writing to redis to ensure we're not reading back the same data if it's avoidable", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10181", "fixedVersions": [], "id": "10181", "issueType": "New Feature", "key": "POOLER-134", "labels": ["customer_facing"], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-12-10T18:26:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Send stats to graphite to support correlating jenkins jobs with the number of SUTs used", "timeSpent": "PT0S", "updated": "2018-12-10T18:26:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've fixed this issue and put up a PR. I tested the fix locally and validated that a machine identified as having failed is successfully removed from the ready queue.", "created": "2018-12-03T13:23:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The fix landed. I'll get this released in the next release along with POOLER-134.", "created": "2018-12-05T16:35:00.000000"}], "components": [], "created": "2018-10-18T09:53:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@75237dd3"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzqmxz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_350730097_*|*_10007_*:*_1_*:*_184380763_*|*_3_*:*_1_*:*_441983378_*|*_10009_*:*_1_*:*_3631_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_3197837784"}], "description": "Since 0.2.2 release of vmpooler there is an issue where the application will identify a machine as having failed because it is no longer responding. When this happens there is a log message about the machine being identified as no longer responding, and being removed from the ready queue, but the pool parameter is an empty string in the log. As a result, I suspect the attempt to move the VM queue is failing, so we see the log error, but the VM stays in the pool.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10180", "fixedVersions": [], "id": "10180", "issueType": "Bug", "key": "POOLER-133", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-12-05T16:35:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler fails to remove ready VMs that it has identified as failed", "timeSpent": "PT0S", "updated": "2018-12-05T16:35:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a PR to fix this. Testing locally it fixes the issue.", "created": "2018-10-03T14:09:00.000000"}], "components": [], "created": "2018-10-03T14:03:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5033c5d2"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzqalz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_368029_*|*_10007_*:*_2_*:*_424520220_*|*_3_*:*_1_*:*_19897868_*|*_6_*:*_1_*:*_0"}], "description": "When a vmpooler pool is configured with /config/poolsize endpoint it sets data in redis to override the value specified via the configuration file. Once set this value takes precedence over the statically configured value. However, when the API portion of the application is restarted it does not reflect the redis configured value until interacting with the endpoint again. This should stay consistent as soon as the application starts. As a benefit this would allow for running multiple instances of vmpooler API if desired.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10143", "fixedVersions": [], "id": "10143", "issueType": "Bug", "key": "POOLER-132", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-10-08T17:36:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pool size does not display correctly when set via endpoint after application restart", "timeSpent": "PT0S", "updated": "2018-10-08T17:36:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I made this change so vmpooler can return the VMs with the label requested. I cleaned up some variable usage to make clear our intent while doing so. A PR is up for review.", "created": "2018-09-27T13:07:00.000000"}], "components": ["VM Pooler"], "created": "2018-09-24T12:45:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@11d6c9c0"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzq2gv:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_30654_*|*_10007_*:*_1_*:*_420524411_*|*_3_*:*_1_*:*_5938879_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_254528199"}], "description": "When a VM is retrieved from vmpooler by using an alias the pool title is returned, whether that title matches the alias used to retrieve the VM or not. When requesting multiple VMs this could mean that requested a pool that has an alias by that name as well could return two different groups of VMs. This causes a problem for some clients like beaker-vmpooler that are just expecting a group of hosts that match the requirements. Vmpooler should be updated to return the name of the platform requested instead of the name of the pool backing what is returned. This information is still available to the user if the user requests information about their VMs, or the VM itself from vmpooler.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10179", "fixedVersions": [], "id": "10179", "issueType": "Bug", "key": "POOLER-131", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-10-02T09:55:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Returned VMs should describe the requested alias instead of VM source", "timeSpent": "PT0S", "updated": "2018-10-02T09:55:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:d266d245-5cba-4a99-842a-d1e061513459", "body": "[~accountid:557058:295d7a84-a09b-4348-8961-a1e1764c190e] Moved to QE as InfraCore has nothing to do with the delta disks.", "created": "2018-08-30T06:17:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It looks like this is a new behavior since the move to 6.5. This bit of code hasn't changed since the delta disk creation capability was introduced to vmpooler. Right now it keeps track of the template for a given pool and if that changes then creates delta disks for the template. It does not track what the old value was for a template so when a template change is reverted it runs the delta disk creation again. This wasn't a problem before, but appears to have become an issue with 6.5. I believe the solution is to track all templates that have had delta disks created and not repeat the operation.", "created": "2018-08-30T08:58:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "Welcome to the wonderful world of Esx 6.5 !!!!", "created": "2018-08-30T09:33:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have put up a change for review that will fix this in a few different ways.\n# It will make a greater effort to not run delta disk creation for a template that has been used before\n# If template delta disk creation fails it will be logged and the template will still be marked as usable\n\nI think this change should reflect how we would like this to work, and stop the pain of this when it fails.", "created": "2018-08-30T16:55:00.000000"}], "components": [], "created": "2018-08-30T06:09:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7a94c917"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hykprz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "30/Aug/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2633660_*|*_10007_*:*_1_*:*_406767444_*|*_10009_*:*_1_*:*_87553426_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_36054407"}], "description": "Have marked this \"Critical\", but its close to a blocker.\n\nI had to a revert on all the Windows August 2018 images at short notice using the {{platform-ci-utils}} command (sample below):\n{noformat}\nplatform-ci-utils imaging-update-vmpooler-pool-templates --instance=ci --pools=win-2016-core-x86_64:templates/win-2016-core-x86_64-20180615_PROD\n{noformat}\n\nIf the delta disk already exists (which it will in reversion), the pool can't be reverted.\nThe worker thread in the vmpooler log appears to die with log similar to:\n{noformat}\n[2018-08-30 11:45:41] [*] [win-10-pro-x86_64] template updated from templates/win-10-pro-x86_64-20180815_PROD to templates/win-10-pro-x86_64-20180615_PROD\n[2018-08-30 11:45:41] [*] [win-10-pro-x86_64] preparing pool template for deployment\n[2018-08-30 11:45:41] [!] [win-10-pro-x86_64] Error while checking the pool: InvalidRequest: \nSingle property deltaDiskFormatVariant provided multiple times\n\nwhile parsing property \"deltaDiskFormatVariant\" of static type string\n\nwhile parsing serialized DataObject of type vim.vm.device.VirtualDisk.FlatVer2BackingInfo\nat line 1, column 5974\n\nwhile parsing property \"parent\" of static type VirtualDiskFlatVer2BackingInfo\n\nwhile parsing serialized DataObject of type vim.vm.device.VirtualDisk.FlatVer2BackingInfo\nat line 1, column 5559\n\nwhile parsing property \"parent\" of static type VirtualDiskFlatVer2BackingInfo\n\nwhile parsing serialized DataObject of type vim.vm.device.VirtualDisk.FlatVer2BackingInfo\nat line 1, column 5144\n{noformat}\n\n\nSo in order to workaround, you need to do the following manual procedure for each template:\n# Rename template to [name]_RB\n# Clone [name]_RB => [name] - remembering all the manual steps correctly.\n# Queue the revert\n\nHad to do this for all templates this morning, so was a fun morning.\n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10178", "fixedVersions": [], "id": "10178", "issueType": "Bug", "key": "POOLER-130", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Critical", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Fixed", "resolutionDate": "2018-09-05T10:13:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Configuring a template that has been configured previously causes delta disk errors", "timeSpent": "PT0S", "updated": "2018-09-05T10:13:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have put up a PR that adds this capability. When digging into this I discovered that the alias stuff we rely on to support multiple backends for a single pool in vmpooler is pretty fragile and works only in our specific case. For example, it would not work the same way if we add a third backend. In addition to the primary focus there are a couple of secondary items that are addressed in order to facilitate local testing.\n* When checking a ready VM domain is passed in if configured. Without this a short name is always used.\n* Docker-compose is updated to use a alternate dockerfile that builds the vmpooler gem locally in the container from source and installs from that instead of the published gem\n* When a pool has a matching alias and all backends do not have a weight configured then the pool is selected randomly from the available templates.\n* Ensure default ttl is set for check_ready_pool_vm from check_pool\n\nThis change is up for review now.\n\nI was a little torn because I think it would make sense to work on aliases a little further so they work in a way that is easier to explain and understand. As it is now only a single alias can exist for a pool. If the alias is specified as an alias to multiple pools only the last one in the configuration file is set as that alias. This should get updated to work with an arbitrary number of aliases, which would let us do something like say checkout a RHEL VM, and have multiple pools tagged with a RHEL alias.\n\nBecause of this there aren't tests for the new features yet. I could add some that test exactly what is possible now, but it may make more sense to just fix this up and submit the improvement as a separate change.", "created": "2018-09-13T11:03:00.000000"}], "components": [], "created": "2018-08-15T10:46:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4c4af36d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzp847:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_602643970_*|*_10007_*:*_1_*:*_428847948_*|*_3_*:*_1_*:*_693702393_*|*_10009_*:*_2_*:*_86089229_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_1210272882"}], "description": "When a vmpooler platform has multiple backends it is possible for a SUT to be retrieved from either backend when the VM is requested. Vmpooler should offer a way to weight these decisions so cluster1 could receive 60% of allocations and cluster2 40% of allocations when the platform has multiple backends. This would cluster utilization to be set by the administrator and get a little more predictability about the spread of VMs, which in turn should allow increasing the maximum number of running VMs while maintaining stability.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10149", "fixedVersions": [], "id": "10149", "issueType": "Improvement", "key": "POOLER-129", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-09-19T10:06:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Allow settings weights for backends", "timeSpent": "PT0S", "updated": "2018-09-19T10:06:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "Hmm - I think this would mean a fairly large change in {{vmpooler}} but again would need [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] to comment.", "created": "2019-05-07T10:12:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966855", "created": "2023-08-03T06:44:00.000000"}], "components": [], "created": "2018-07-27T07:08:00.000000", "creator": "557058:d266d245-5cba-4a99-842a-d1e061513459", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2883560b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzojtb:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "07/May/19"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_158369779481_*|*_6_*:*_1_*:*_0"}], "description": "The current process produces virtual machines that get used like templates... my request is that actual templates are created.\n\nPinging [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] in case this request would have any impact on vmpooler", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10167", "fixedVersions": [], "id": "10167", "issueType": "New Feature", "key": "POOLER-201", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:d266d245-5cba-4a99-842a-d1e061513459", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:44:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:44 AM", "summary": "Mark templates as templates in vCenter", "timeSpent": "PT0S", "updated": "2023-08-03T06:44:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have put up a change for review and I think it's ready to roll.", "created": "2018-07-23T16:59:00.000000"}], "components": [], "created": "2018-07-23T16:56:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3c9cae4a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzoesv:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_199093_*|*_10007_*:*_1_*:*_151724808_*|*_5_*:*_1_*:*_0"}], "description": "Vmpooler recently added per VM mutex objects to prevent VM checks from happening simultaneously across threads. These objects are referenced in an instance variable, and the reference to them is never removed, which means they cannot be garbage collected and vmpooler memory slowly bloats over time. We should dereference these objects.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10107", "fixedVersions": [], "id": "10107", "issueType": "Improvement", "key": "POOLER-128", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-07-25T11:08:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Remove references to mutex objects when done with them", "timeSpent": "PT0S", "updated": "2018-07-25T11:08:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:cd25c829-41d1-41be-b9ec-df76c04b00c2", "body": "The new release job created in our mobbing session should allow vmpooler docker images to be build automatically for a release.  Closing ticket.", "created": "2018-08-31T10:45:00.000000"}], "components": [], "created": "2018-07-11T16:49:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6f540aed"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzo72v:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "31/Aug/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_4384558636_*|*_5_*:*_1_*:*_0"}], "description": "We should build docker images for vmpooler release tags automatically. We could alternately do this for commits to master, but this may take up an excessive amount of space.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10166", "fixedVersions": [], "id": "10166", "issueType": "Improvement", "key": "POOLER-127", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-08-31T10:45:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Build new docker image versions automatically on release tags", "timeSpent": "PT0S", "updated": "2018-08-31T10:45:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:5e3ab1ea-c44e-4f59-85de-75f38ca423f1", "attachments": [{"attacher": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "created": "2018-07-09T06:19:00.000000", "name": "Screen Shot 2018-07-09 at 1.17.19 PM.png", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10007"}, {"attacher": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "created": "2018-07-24T06:13:00.000000", "name": "Screen Shot 2018-07-24 at 1.11.50 PM.png", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10003"}], "comments": [{"author": "557058:5e3ab1ea-c44e-4f59-85de-75f38ca423f1", "body": "It looks like storage for the vmpooler instances has filled up. There are tons and tons of old powered off instances on disk, working on clearing those to free space.", "created": "2018-07-09T07:49:00.000000"}, {"author": "557058:5e3ab1ea-c44e-4f59-85de-75f38ca423f1", "body": "We're proceeding with purging existing instances. Any jobs that were ongoing should have completed by now, so we don't expect any adverse effects. All templates and anything that didn't look like a pooler spawned VM should be retained.\n\nThere were roughly 3000 VMs in inventory at the time the vmpooler was shut down, and we found around 45000 on disk, which is surely a contributor to the problem here... Following deletion of the instances on disk we'll purge the vCenter's inventory database. Once that's done vmpooler can be restarted and pools should replenish.\u00a0", "created": "2018-07-09T11:45:00.000000"}, {"author": "63d40635a05386069cdb69d6", "body": "Are we running this through ICS or anything? ", "created": "2018-07-09T12:12:00.000000"}, {"author": "557058:5e3ab1ea-c44e-4f59-85de-75f38ca423f1", "body": "[~accountid:63d40635a05386069cdb69d6] I considered starting the ICS process but elected not to spin it up since the number of people required to address the problem was so small. I was able to immediately link up with [~accountid:557058:d266d245-5cba-4a99-842a-d1e061513459] and we figured out our plan for corrective action quickly. Formal ICS might've been helpful in answering the questions I had about the vmpooler app, but that came up a bit later after we'd already started digging in to the problem and mainly just depended on the right people popping onto Hipchat.", "created": "2018-07-09T12:58:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It looks like things started working again with vmware-vc6 on vmpooler-cinext so I fired up hornet as well so adhoc testing capacity would be available as well. I think that a byproduct of this fix approach is that any long-running instances folks may have had with vmpooler/hornet will have been destroyed. I'm not sure if that's something we should inform folks about specifically, but it's perhaps something we should consider doing.", "created": "2018-07-09T13:55:00.000000"}, {"author": "557058:5e3ab1ea-c44e-4f59-85de-75f38ca423f1", "body": "Okay, the cleanup work is done and vmpooler has been restarted. It looks like the pools are now replenishing, and we have lots of free disk on the filer. Will keep this ticket open through the day just in case anything else crops up.", "created": "2018-07-09T14:08:00.000000"}, {"author": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "body": "Reopening as this is being seen again, I have attached an image with the status of the pooler. ", "created": "2018-07-24T06:14:00.000000"}, {"author": "557058:d266d245-5cba-4a99-842a-d1e061513459", "body": "This is not related to the current issues. The current issue appears to be vCenter related. Tracking the new issue in\u00a0INFC-17380", "created": "2018-07-24T06:53:00.000000"}], "components": [], "created": "2018-07-09T06:20:00.000000", "creator": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Acceptance Criteria", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:textarea", "value": "- Pools are refilled"}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@ab01f3a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzo4f3:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "09/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_23896348_*|*_3_*:*_1_*:*_104507644_*|*_4_*:*_1_*:*_2360560_*|*_6_*:*_2_*:*_1167217037"}], "description": "When trying to run tests locally I am seeing the following: \n\n{code}\n\u279c  puppetlabs-mysql git:(ma_loc_testing) \u2717 BEAKER_PUPPET_COLLECTION=puppet5 PUPPET_INSTALL_TYPE=agent BEAKER_debug=true BEAKER_PE_DIR=http://enterprise.delivery.puppetlabs.net/2018.1/ci-ready BEAKER_PE_VER='2018.1.3-rc0-167-g6b11e98' BEAKER_PUPPET_AGENT_VERSION=5.3.5 BEAKER_TESTMODE=apply TEST_FRAMEWORK=beaker-rspec BEAKER_set=a bundle exec rspec spec/acceptance/locales_spec.rb\n/Users/paula/workspace/puppetlabs-mysql/.bundle/gems/ruby/2.5.0/gems/beaker-rspec-6.2.3/lib/beaker-rspec/helpers/serverspec.rb:43: warning: already initialized constant Module::VALID_OPTIONS_KEYS\n/Users/paula/workspace/puppetlabs-mysql/.bundle/gems/ruby/2.5.0/gems/specinfra-2.67.3/lib/specinfra/configuration.rb:4: warning: previous definition of VALID_OPTIONS_KEYS was here\nHypervisor for redhat7-64-1 is vmpooler\nBeaker::Hypervisor, found some vmpooler boxes to create\nRequesting VM set from vmpooler (with authentication token)\nFailed vmpooler provision: RuntimeError : Vmpooler.provision - response from pooler not ok. Requested host set [\"redhat-7-x86_64\"] not available in pooler.\n{\"ok\"=>false}\nRetrying provision for vmpooler host after waiting 1 second(s)\nRequesting VM set from vmpooler (with authentication token)\nFailed vmpooler provision: RuntimeError : Vmpooler.provision - response from pooler not ok. Requested host set [\"redhat-7-x86_64\"] not available in pooler.\n{\"ok\"=>false}\nRetrying provision for vmpooler host after waiting 1 second(s)\nRequesting VM set from vmpooler (with authentication token)\nFailed vmpooler provision: RuntimeError : Vmpooler.provision - response from pooler not ok. Requested host set [\"redhat-7-x86_64\"] not available in pooler.\n{\"ok\"=>false}\nRetrying provision for vmpooler host after waiting 2 second(s)\nRequesting VM set from vmpooler (with authentication token)\nFailed vmpooler provision: RuntimeError : Vmpooler.provision - response from pooler not ok. Requested host set [\"redhat-7-x86_64\"] not available in pooler.\n{\"ok\"=>false}\nRetrying provision for vmpooler host after waiting 3 second(s)\n^C\nRSpec is shutting down and will print the summary report... Interrupt again to force quit.\n{code}\n\nwhen looking into http://vmpooler.delivery.puppetlabs.net/dashboard/ most pools are empty. See screenshot attached. ", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10059", "fixedVersions": [], "id": "10059", "issueType": "Bug", "key": "POOLER-126", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Blocker", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "resolution": "Fixed", "resolutionDate": "2018-07-24T06:53:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler: Most resources are empty on the pooler", "timeSpent": "PT0S", "updated": "2018-07-24T06:53:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It turns out our graphite support in vmpooler is pretty raw. We're sending plaintext to graphite directly. We should probably use a library.", "created": "2018-07-23T17:05:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The migration stats were in fact shipped. However, it was no longer necessary to reference 'count' within the metric tree. I've updated the dashboard accordingly.", "created": "2018-07-25T11:55:00.000000"}], "components": [], "created": "2018-07-05T12:00:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@44d36481"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz1van:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1725121378_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_2579043"}], "description": "Since using the graphite metrics provider and jruby 9k we no longer get migration stats. This ticket serves to track sorting out why we no longer receive these statistics.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10058", "fixedVersions": [], "id": "10058", "issueType": "Bug", "key": "POOLER-125", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-07-25T11:55:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Migration stats are not shipped when using graphite metrics provider", "timeSpent": "PT0S", "updated": "2018-07-25T11:55:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [], "components": [], "created": "2018-07-03T12:11:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@74b66f5e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzo2c7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_12680_*|*_3_*:*_1_*:*_775429_*|*_10009_*:*_1_*:*_595905824_*|*_5_*:*_1_*:*_0"}], "description": "Ticket for [https://github.com/puppetlabs/vmpooler/issues/199]\u00a0.\u00a0", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10056", "fixedVersions": [], "id": "10056", "issueType": "Bug", "key": "POOLER-124", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-07-10T09:56:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Setting max_tries should not go in infinite loop", "timeSpent": "PT0S", "updated": "2018-07-10T09:56:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "557058:79f2fdd6-baee-43c4-91af-c7e647173c95", "body": "FWIW, I'd vote for one week is the max TTL. I think one day could be too short, there has been a scenario where I've had to extend a VM I was using at the end of the day in order to use it the next day. ", "created": "2019-12-02T10:24:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I see two scenarios: \n1) Someone checkouts a vm and set an impossible long lifetime - which essentially keeps it running forever\n2) Someone keeps changing the lifetime to extend it, running it for extended periods of time\n\nI'll make sure to cover both cases and have a config setting for that absolute max TTL upper limit.", "created": "2019-12-02T10:35:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Vmpooler code has been merged and deployed.\nWe still need to merge the plm code to start using the feature:\nhttps://github.com/puppetlabs/puppetlabs-modules/pull/9488", "created": "2019-12-13T07:19:00.000000"}], "components": ["VM Pooler"], "created": "2018-07-02T11:21:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3b7a1047"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzo0x3:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "02/Dec/19"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_43117322528_*|*_10007_*:*_1_*:*_246928212_*|*_3_*:*_1_*:*_10685463_*|*_10009_*:*_1_*:*_2245244054_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_2_*:*_1637755267"}], "description": "Vmpooler should not allow a user to extend a VM lifetime forever. An operator should be able to configure an upper bounds that is reasonable this and discourages running anything long running with vmpooler provisioned machines.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10148", "fixedVersions": [], "id": "10148", "issueType": "Improvement", "key": "POOLER-123", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2019-12-31T09:34:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Implement a max TTL", "timeSpent": "PT0S", "updated": "2020-03-23T14:01:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at [https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966846|https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966846]", "created": "2023-08-03T06:38:00.000000"}], "components": [], "created": "2018-06-27T15:21:00.000000", "creator": "557058:d494f5f9-7e70-4298-bf72-c8f9e7cca4db", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@313991dd"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznyn3:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_160931789593_*|*_6_*:*_1_*:*_0"}], "description": "Currently, the config/pooltemplate and config/poolsize return a status code of 200 to indicate that no changes were made, 201 to indicate that changes were made. It would be useful to extend these endpoints to return the updated pools in the response, and use a single status code indicating success. This way, we can see any config. changes at the granularity of a single pool.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10086", "fixedVersions": [], "id": "10086", "issueType": "Improvement", "key": "POOLER-122", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:d494f5f9-7e70-4298-bf72-c8f9e7cca4db", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:38:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:38 AM", "summary": "Extend config/pooltemplate + config/poolsize to return updated pools in response", "timeSpent": "PT0S", "updated": "2023-08-03T06:38:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966836", "created": "2023-08-03T06:51:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-25T10:04:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6e910c17"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznwa7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_161124414835_*|*_6_*:*_1_*:*_0"}], "description": "When a host or VM is disconnected vmpooler cannot complete check pool operations because it always tries to discover first, and discovery explodes when it cannot communicate with a VM. This situation should be handled more gracefully to allow for pools to stay replenished when there are host or VM issues.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10052", "fixedVersions": [], "id": "10052", "issueType": "Bug", "key": "POOLER-121", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:51:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:51 AM", "summary": "vmpooler should gracefully handle disconnected VMs and hosts", "timeSpent": "PT0S", "updated": "2023-08-03T06:51:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2018-06-23T10:14:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@291a5204"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznw1b:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8826298155_*|*_6_*:*_1_*:*_0"}], "description": "The centos 6 and 7 pools in vsphere-vc6 provider are not able to fill pools consistently with a 5 minute timeout. This value should probably be higher, perhaps 15 minutes, to reflect that vc6 is not performant enough to bring VMs from clone to ready in 5 minutes when under heavy load during PDX nights and over the weekend. This value is probably fine at pix since the instance is much more performant and less timeouts occur.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10048", "fixedVersions": [], "id": "10048", "issueType": "Improvement", "key": "POOLER-120", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-10-03T13:59:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "centos6 and 7 pools on vc6 provider have timeout that is too short", "timeSpent": "PT0S", "updated": "2018-10-03T13:59:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Thanks [~accountid:557058:56abc7e4-6462-4177-8eb0-d77979538f8d] for creating the ticket. We had made some changes to the disk adding methods that were put in place on Wednesday and rolled back Thursday do to some issues. Can you please link me to the job where this failed?\n\n\u00a0\n\nNevermind, I see this is via beaker, not via jenkins.", "created": "2018-06-22T10:10:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Reviewing the log for this it looks like vmpooler does not have record of 'cxhjg8eup4qtgmi' having created the disk. For 'w0ve3u23wvsbkrk' it looks like the disk was created, but that the creation was not completed for some time. Reviewing logs in kibana I see the request to add the disk coming across per vmpooler at 6:00 and then being completed at 6:05. It shows it took 330 seconds to add the disk. The current disk adding method is quite expensive due to how it looks up something on the datastore about the VM, and as a result can take a long time. It looks like beaker in this case gave up on the disk before it was provisioned, though in the first case I don't see where it acknowledged or attempted to created the disk based on the request.\n\nLink to logs for '[w0ve3u23wvsbkrk|http://kibana.ops.puppetlabs.net/#/discover?_g=(refreshInterval:(display:Off,pause:!f,section:0,value:0),time:(from:now-12h,mode:quick,to:now))&_a=(columns:!(message),index:%5Blogspout-%5DYYYY.MM,interval:auto,query:(query_string:(analyze_wildcard:!t,query:'docker.image:*vmpooler%20AND%20message:*w0ve3u23wvsbkrk*')),sort:!('@timestamp',desc))]'.\n\nWe are working on improving the performance of these methods, but I had to roll back an improvement that was implemented Wednesday due to some issues, so we're using the same disk adding methods we have been for some time now. It's possible that the load of vcenter had to do with why this instance took so long to respond. In any case, I hope we can get it working so the tests can work.\n\n[~accountid:557058:56abc7e4-6462-4177-8eb0-d77979538f8d], if you try again to see if these failures persistent?", "created": "2018-06-22T10:48:00.000000"}, {"author": "557058:a7f95b1d-db7a-4dda-b600-bb7feec74396", "body": "Also seeing these errors locally with the following output, line 15 points towards vmpooler.add_disk method failing:\n\n\u00a0\n{code:java}\n\u00a0\n\u279c puppetlabs-satellite_pe_tools git:(master) \u2717 BEAKER_provision=yes BEAKER_PE_VER='2018.1.3-rc0-14-g0ed1118' BEAKER_PE_VERSION_FILE='LATEST' TEST_FRAMEWORK=beaker-rspec BEAKER_destroy=no PUPPET_INSTALL_VERSION=2018.1 PUPPET_INSTALL_TYPE=pe BEAKER_PE_DIR=http://enterprise.delivery.puppetlabs.net/2018.1/ci-ready bundle exec rake beaker\n\n\n TEST_TIERS env variable not defined. Defaulting to run all tests.\n/Users/helencampbell/.rbenv/versions/2.3.1/bin/ruby -I/Users/helencampbell/workspace/puppetlabs-satellite_pe_tools/.bundle/gems/ruby/2.3.0/gems/rspec-core-3.7.1/lib:/Users/helencampbell/workspace/puppetlabs-satellite_pe_tools/.bundle/gems/ruby/2.3.0/gems/rspec-support-3.7.1/lib /Users/helencampbell/workspace/puppetlabs-satellite_pe_tools/.bundle/gems/ruby/2.3.0/gems/rspec-core-3.7.1/exe/rspec spec/acceptance --color\n/Users/helencampbell/workspace/puppetlabs-satellite_pe_tools/.bundle/gems/ruby/2.3.0/gems/beaker-rspec-6.2.3/lib/beaker-rspec/helpers/serverspec.rb:43: warning: already initialized constant Module::VALID_OPTIONS_KEYS\n/Users/helencampbell/workspace/puppetlabs-satellite_pe_tools/.bundle/gems/ruby/2.3.0/gems/specinfra-2.67.3/lib/specinfra/configuration.rb:4: warning: previous definition of VALID_OPTIONS_KEYS was here\n\n\n Beaker::Hypervisor, found some vmpooler boxes to create \n Warning: Psych::SyntaxError: Credentials file (/Users/helencampbell/.fog) has invalid syntax", "created": "2018-06-25T03:17:00.000000"}, {"author": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "body": "Hey [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72],\n\nThat sounds correct, we only request to add additional disk space to one of the machine and in this case it is 'w0ve3u23wvsbkrk.\n\nI have seen this consistently failing locally, and [here|https://jenkins-master-prod-1.delivery.puppetlabs.net/view/modules/view/linux/view/satellite-pe-tools/view/master/job/forge-module_puppetlabs-satellite-pe-tools_intn-sys_full-pe-previous-master/PLATFORM=redhat7-64mcda-64satellite.%257Bdisks=%255B16%255D%257D,WORKER_LABEL=beaker/3/console] is a link to our Jenkins job where it has failed multiple times.\n\nThank you for taking a look at this :-)", "created": "2018-06-25T03:17:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I re-implemented some changes in vmpooler that should make disk add operations much faster and more reliable. Can you let me know if this works next time you're able to test?", "created": "2018-06-25T12:01:00.000000"}, {"author": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "body": "Hey [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72]\n\nI have ran locally and [~accountid:557058:a7f95b1d-db7a-4dda-b600-bb7feec74396] has rekicked Jenkins and I can confirm it has been resolved.\nIt also only takes 10.90seconds to add a disk which is super fast and great!\n\nWe would both like to thank you for such a quick turn around on this ticket it is really appreciated! \n\n:-) ", "created": "2018-06-26T02:28:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-22T07:16:00.000000", "creator": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Acceptance Criteria", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:textarea", "value": "It is possible to add disks "}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@42098748"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznu7r:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "22/Jun/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_7983293_*|*_3_*:*_1_*:*_263957492_*|*_10009_*:*_1_*:*_52042775_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_4366210"}], "description": "When running tests against a nodeset that requests 16GB of disk size it times out and is unable to extend add the disk to the machine. I can confirm this was running with no issues yesterday.\n\n*+Expected Results+* \n16GB disk is added to machine\n\n*+Actual Results+*\n16GB disk is not added to the machine\n\n*+Logs+*\n{code}\nBeaker::Hypervisor, found some vmpooler boxes to create\nRequesting VM set from vmpooler (with authentication token)\nUsing available host 'cxhjg8eup4qtgmi.delivery.puppetlabs.net' (redhat7-64-1)\nUsing available host 'w0ve3u23wvsbkrk.delivery.puppetlabs.net' (redhat7-64-2)\nSpent 0.71 seconds grabbing VMs\nTagging vmpooler VMs\nSpent 0.93 seconds tagging VMs\nRequesting an additional disk of size 16GB for w0ve3u23wvsbkrk\nWaiting for disk\n....................\n\nAn error occurred while loading ./spec/acceptance/test_spec.rb.\nFailure/Error: require 'beaker-rspec/spec_helper'\nRuntimeError:\n  Could not verify disk was added after 211.49 seconds\n\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-vmpooler-1.2.0/lib/beaker/hypervisor/vmpooler.rb:243:in `block (2 levels) in provision'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-vmpooler-1.2.0/lib/beaker/hypervisor/vmpooler.rb:234:in `each'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-vmpooler-1.2.0/lib/beaker/hypervisor/vmpooler.rb:234:in `each_with_index'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-vmpooler-1.2.0/lib/beaker/hypervisor/vmpooler.rb:234:in `block in provision'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-vmpooler-1.2.0/lib/beaker/hypervisor/vmpooler.rb:227:in `each'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-vmpooler-1.2.0/lib/beaker/hypervisor/vmpooler.rb:227:in `provision'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-3.35.0/lib/beaker/hypervisor.rb:41:in `create'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-3.35.0/lib/beaker/network_manager.rb:73:in `block in provision'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-3.35.0/lib/beaker/network_manager.rb:72:in `each_key'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-3.35.0/lib/beaker/network_manager.rb:72:in `provision'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-rspec-6.2.3/lib/beaker-rspec/beaker_shim.rb:35:in `provision'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-rspec-6.2.3/lib/beaker-rspec/spec_helper.rb:50:in `block in <top (required)>'\n# ./.bundle/gems/ruby/2.5.0/gems/beaker-rspec-6.2.3/lib/beaker-rspec/spec_helper.rb:5:in `<top (required)>'\n# ./spec/spec_helper_acceptance.rb:1:in `require'\n# ./spec/spec_helper_acceptance.rb:1:in `<top (required)>'\n# ./spec/acceptance/test_spec.rb:1:in `require'\n# ./spec/acceptance/test_spec.rb:1:in `<top (required)>'\nNo examples found.\n\nFinished in 0.00031 seconds (files took 3 minutes 34.8 seconds to load)\n0 examples, 0 failures, 1 error occurred outside of examples\n{code}\n\nSetting to blocker as this is blocking work that is scheduled for the satellite_pe_tools module\n", "environment": "*Pooler URL*: http://vmpooler.delivery.puppetlabs.net/dashboard/\n*Module:* satellite_pe_tools\n*beaker-hostgenerator string:* redhat7-64mcda-64satellite.%7Bdisks=%5B16%5D%7D\n*Command used to run*: BEAKER_provision=yes BEAKER_PE_VER='2018.1.3-rc0-14-g0ed1118' BEAKER_PE_VERSION_FILE='LATEST' TEST_FRAMEWORK=beaker-rspec BEAKER_set=paula BEAKER_destroy=no PUPPET_INSTALL_VERSION=2018.1 PUPPET_INSTALL_TYPE=pe BEAKER_PE_DIR=http://enterprise.delivery.puppetlabs.net/2018.1/ci-ready bundle exec rake beaker", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10177", "fixedVersions": [], "id": "10177", "issueType": "Bug", "key": "POOLER-119", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Blocker", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:56abc7e4-6462-4177-8eb0-d77979538f8d", "resolution": "Fixed", "resolutionDate": "2018-06-26T02:28:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler: Fails to add disk", "timeSpent": "PT0S", "updated": "2018-06-26T02:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This is not advisable for redis, so I'm bailing on this plan.", "created": "2018-06-19T15:37:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-15T16:05:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3f4351ad"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznphb:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_343887740_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler should support setting the redis database number in the redis configuration.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10046", "fixedVersions": [], "id": "10046", "issueType": "Improvement", "key": "POOLER-118", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Won't Do", "resolutionDate": "2018-06-19T15:37:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Add support for specifying the redis database number", "timeSpent": "PT0S", "updated": "2018-06-19T15:37:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966830", "created": "2023-08-03T06:37:00.000000"}], "components": [], "created": "2018-06-13T16:07:00.000000", "creator": "557058:d494f5f9-7e70-4298-bf72-c8f9e7cca4db", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@595413e6"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzno2f:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_162138574948_*|*_6_*:*_1_*:*_0"}], "description": "For example on vmpooler-dev,\n{code}\nenis:puppetlabs-packer enis.inan$ curl -X GET https://vmpooler-dev.delivery.puppetlabs.net/api/v1/vm/dlw6cc5czsvltdq\n{\n  \"ok\": true,\n  \"dlw6cc5czsvltdq\": {\n    \"template\": \"dev-pool\",\n    \"lifetime\": 1,\n    \"running\": 0.0,\n    \"state\": \"running\",\n    \"ip\": \"10.32.116.75\",\n    \"domain\": \"delivery.puppetlabs.net\"\n  }\n}\n{code}\n\ndev-pool is the pool.\n\nThe endpoint should return the host's template instead of the pool.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10165", "fixedVersions": [], "id": "10165", "issueType": "Bug", "key": "POOLER-117", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:d494f5f9-7e70-4298-bf72-c8f9e7cca4db", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:37:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:37 AM", "summary": "vm/hostname endpoint returns the pool name as the host's template", "timeSpent": "PT0S", "updated": "2023-08-03T06:37:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "A job has been configured in ci-job-configs to create vmpooler releases. Once POOLER-109 is merged I plan to deploy a release, which I expect to happen sometime the week of 7/16.", "created": "2018-07-13T21:05:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-11T16:44:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2225f0ba"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz1vav:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2568571927_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_541949359"}], "description": "vmpooler has no releases on github. [https://github.com/puppetlabs/vmpooler/issues/262]\u00a0was created to create a release. We should make a release of vmpooler once the current round of improvements is wrapped up.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10102", "fixedVersions": [], "id": "10102", "issueType": "Task", "key": "POOLER-116", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-07-17T16:46:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Create a release of vmpooler", "timeSpent": "PT0S", "updated": "2018-07-17T16:46:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "We already do this.", "created": "2018-06-25T10:08:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-11T16:16:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@973e276"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznkw7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1187519677_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler verifies that a VM can be reached via TCP on port 22 before moving it into a ready state. Once in the ready state a variety of checks are performed to ensure the machine exists, but this TCP connection test is not repeated. We should consider adding this test to the check of ready VMs and identify when a host can no longer be reached via SSH.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10147", "fixedVersions": [], "id": "10147", "issueType": "Improvement", "key": "POOLER-115", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-06-25T10:08:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "A ready VM should be checked for SSH availability", "timeSpent": "PT0S", "updated": "2018-06-25T10:08:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "5ad8bc5f56d6c72b4519625c", "body": "Current mob: [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72],\u00a0 [~accountid:623c0cebbef8a60068c7977d], [~accountid:557058:cd25c829-41d1-41be-b9ec-df76c04b00c2], [~accountid:557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4],\u00a0[~accountid:5ad8bc5f56d6c72b4519625c].\n\nWe have tackled refactoring #_check_pool:\u00a0https://github.com/puppetlabs/vmpooler/commits/bring-the-pitchfork\n * Split #create_inventory\n * Split #check_running_pool_vms\n * Split #check_ready_pool_vms\n * Split #check_pending_pool_vms\n * Split #check_completed_pool_vms\n * Split #check_discovered_pool_vms\n * Split #check_migrating_pool_vms\n\nTODO:\n * Split \"repopulate\" phase:\u00a0https://github.com/puppetlabs/vmpooler/blob/5322d715b33f0905a686a40cd1208a42f3af414f/lib/vmpooler/pool_manager.rb#L860\n * Refactor #create_inventory spec tests to match the refactor of #check_X_pool_vms style. It's been stubbed: https://github.com/puppetlabs/vmpooler/blob/5322d715b33f0905a686a40cd1208a42f3af414f/spec/unit/pool_manager_spec.rb#L2654\n * Move\u00a0#evaluate_template documentation comment to the method declaration:\u00a0https://github.com/puppetlabs/vmpooler/blob/5322d715b33f0905a686a40cd1208a42f3af414f/lib/vmpooler/pool_manager.rb#L854\n * Rebase commits to follow tagging conventions.", "created": "2018-07-23T12:27:00.000000"}, {"author": "5ad8bc5f56d6c72b4519625c", "body": "Mob session 24 July 2018, same participants:\n * Cleaned up spec tests for #check_migrating_pool_vms\n * Split #repopulate_pool_vms\n * Refactored signature of #clone_vm and #_clone_vm to pass pool_name, and refactored spec tests\n * Partial refactor on #repopulate_pool_vms spec tests\n\nTODO:\n * Finish cleaning up #repopulate_pool_vms spec tests\n * Discuss refactoring spec tests use of $config and possibly execute\n * Discuss refactoring bizarre iteration on pool_manager.rb:837 in #repopulate_pool_vms\n * Discuss refactoring #clone_vm to support batch cloning (make multiple clones in one call)\n * Rebase branch before merge", "created": "2018-07-24T12:45:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I created a new PR for this so it is easier to tell if I made any mistakes during the rebase. I made a few minor fixes in the tests where there was a comment about references to a redis based lock. The other main change is with the docker-compose file, which is updated to work with vmpooler loaded as a gem.", "created": "2018-08-23T15:11:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Although this says re-write pool_manager I think the focus was check_pool. I think this is done and further improvements should be separately scoped. Please add a comment or re-open if I've missed something or you disagree.", "created": "2018-08-23T16:32:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-11T10:16:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@70c11b9d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz1vaf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "5.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "23/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_3203588491_*|*_10007_*:*_1_*:*_1994695024_*|*_3_*:*_1_*:*_1131522107_*|*_6_*:*_1_*:*_0"}], "description": "Pool manager is a significant portion of the vmpooler application, and where logic around managing and filling pools is stored. It has evolved over time and has become fairly sprawling, which can make it more difficult to determine what pool manager is actually trying to do. We should consider re-writing this to make it more clear, and perhaps improve its function, both for ourselves, and to make life easier for future contributors.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10106", "fixedVersions": [], "id": "10106", "issueType": "Improvement", "key": "POOLER-114", "labels": ["mob"], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-08-23T16:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Re-write pool_manager", "timeSpent": "PT0S", "updated": "2018-08-23T16:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has landed and been deployed. Service and human users can now both auth to vmpooler with a single config.", "created": "2018-06-28T15:51:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-08T13:44:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@119b894b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznit3:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_419892138_*|*_10007_*:*_1_*:*_195252453_*|*_10009_*:*_1_*:*_39548452_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_1080941184"}], "description": "We should be able to specify multiple search DNs, currently referenced as the LDAP base in vmpooler configuration. For our use case we have ou=users and then ou=service,ou=users. We cannot generate a token using the token API without changing the OU for a service user, and then changing it back. We should be able to allow a service user and a human user to get a token from vmpooler with a single configuration.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10164", "fixedVersions": [], "id": "10164", "issueType": "Improvement", "key": "POOLER-113", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-06-28T15:51:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Add support for multiple LDAP search base entries", "timeSpent": "PT0S", "updated": "2018-06-28T15:51:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have put up a PR for this change. My solution is to implement a VM mutex that tracks VM operations across threads. I implemented this for checking when ready, pending, running, destroying, and migrating to ensure that these things do not happen in parallel, which is a waste of efforts, and produces confusing log messages.", "created": "2018-06-13T17:13:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been merged.", "created": "2018-06-20T15:07:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-08T13:19:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7f0857cf"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznirz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_421346280_*|*_10007_*:*_1_*:*_451131759_*|*_10009_*:*_1_*:*_146141929_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_24614795"}], "description": "Vmpooler will attempt to destroy a VM multiple times from different threads at the same time. This creates a noisy log about failed destroy attempts, since only one will succeed, and wastes application resources. Vmpooler should be aware of a destroy operation in progress, perhaps by leveraging a mutex for the VM, to ensure that only one destroy attempt occurs.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10142", "fixedVersions": [], "id": "10142", "issueType": "Improvement", "key": "POOLER-112", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-06-20T15:07:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Ensure vmpooler only destroys a VM once", "timeSpent": "PT0S", "updated": "2018-06-20T15:07:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a PR for review that adds this capability.", "created": "2018-06-08T13:10:00.000000"}], "components": ["VM Pooler"], "created": "2018-06-08T13:09:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@66d5614c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hznirb:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9925_*|*_10009_*:*_1_*:*_421119030_*|*_5_*:*_1_*:*_0"}], "description": "Vmpooler should be able to set the redis port and password.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10043", "fixedVersions": [], "id": "10043", "issueType": "Improvement", "key": "POOLER-111", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-06-13T10:08:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Add capability to set redis port and password", "timeSpent": "PT0S", "updated": "2018-06-13T10:08:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:d494f5f9-7e70-4298-bf72-c8f9e7cca4db", "body": "One idea is the endpoint could take in a JSON, where each entry is:\n{code}\n<pool-name> : <pool-config>\n{code}\n\nThe API would validate the pool-config for each pool, failing if one pool's pool config. is invalid. Once validation passes, it would go ahead and update Redis with the pool config. information (if any changes were made) (The two endpoints in the PR do something similar to this, with a similar input JSON). It would return a 200 status, with a JSON response where the response body would be each of the input pools, and each entry would be:\n{code}\n<pool-name>: <result>\n{code}\n\nwhere the <result> could be one of the following:\n# \"unchanged\" if no changes were made\n# \"changed\", then an array of the config. entries that were changed\n\nA 404 would be returned for an error if a pool is not found, and a 400 would be returned if validation fails (if at least one pool's entry has invalid config).\n\nThis endpoint could be useful for creating new pools", "created": "2018-05-25T13:33:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966810", "created": "2023-08-03T06:36:00.000000"}], "components": [], "created": "2018-05-25T13:29:00.000000", "creator": "557058:d494f5f9-7e70-4298-bf72-c8f9e7cca4db", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2b497d0c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzmc8v:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Aug/23"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_163789669870_*|*_6_*:*_1_*:*_0"}], "description": "https://github.com/puppetlabs/vmpooler/pull/256 introduces two new endpoints, {{config/poolsize}} and {{config/pooltemplate}} that update the poolsize and pooltemplate for a given list of pools. The pool manager later checks the pool config. and syncs. up any entries that need to be sync.d (e.g. it swaps out an old template for a new one if the template changed).\n\nThis could be reduced to a single endpoint, something like `config/pool`. Work in this ticket would investigate what that endpoint might look like and if it would be useful.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10055", "fixedVersions": [], "id": "10055", "issueType": "New Feature", "key": "POOLER-110", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:d494f5f9-7e70-4298-bf72-c8f9e7cca4db", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:36:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:36 AM", "summary": "Investigate the use of a single endpoint for updating pool configuration", "timeSpent": "PT0S", "updated": "2023-08-03T06:36:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a PR that does this. To test it I've split vmpooler-provisioner-dev-2 into an api and manager instance and have both along with redis running as a group in marathon. This is helpful because it would allow us to scale pool_manager and our API instance separately from one another.", "created": "2018-07-11T18:11:00.000000"}], "components": ["VM Pooler"], "created": "2018-05-16T15:54:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@626df25c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzm25z:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_4760107216_*|*_10007_*:*_1_*:*_24401237_*|*_3_*:*_1_*:*_86467424_*|*_10009_*:*_1_*:*_408891805_*|*_6_*:*_1_*:*_0"}], "description": "Pool manager and the API do not have to be run in the same place, but they do both require the same configuration file. We should load the configuration file into redis from pool manager and let the API application access its configuration from redis.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10053", "fixedVersions": [], "id": "10053", "issueType": "Improvement", "key": "POOLER-109", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-07-16T18:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Load API configuration from redis", "timeSpent": "PT0S", "updated": "2018-07-16T18:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This is accomplished in\u00a0https://github.com/puppetlabs/vmpooler/pull/256.", "created": "2018-05-22T16:39:00.000000"}], "components": ["VM Pooler"], "created": "2018-05-16T15:45:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7181995"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzm25b:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_521635845_*|*_10009_*:*_1_*:*_2914203446_*|*_6_*:*_1_*:*_0"}], "description": "We should create template deltas automatically from the vmpooler application while running instead of via a separate script. It may be easiest to do this as a background process for each pool at startup time.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10009", "fixedVersions": [], "id": "10009", "issueType": "Improvement", "key": "POOLER-108", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-06-25T10:09:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Automatically create template deltas for configured vmpooler pool templates", "timeSpent": "PT0S", "updated": "2018-06-25T10:09:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put my last few changes on this PR. I don't think any additional changes are required, but will allow for a little further review for necessary before pushing for this to get merged. I added a flag so the endpoints being enabled are optional.", "created": "2018-06-13T17:15:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been implemented, merged to master, and deployed to all dev and production vmpooler instances.", "created": "2018-06-20T15:05:00.000000"}], "components": ["VM Pooler"], "created": "2018-05-16T15:42:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@711de181"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzm24v:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_521816575_*|*_10007_*:*_1_*:*_1141988696_*|*_10009_*:*_1_*:*_1357929381_*|*_6_*:*_1_*:*_0"}], "description": "We should accept some types of configuration changes via API for vmpooler. It should probably limit the set of users permitted to perform these actions.\n\nInitially, we should target pool size and template changes. Size changes will cause pools to be resized, up or down. Template changes refresh the pool and create a new template delta on the new template before repopulating a pool.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10176", "fixedVersions": [], "id": "10176", "issueType": "Improvement", "key": "POOLER-107", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-06-20T15:05:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Accept pool configuration changes via API", "timeSpent": "PT0S", "updated": "2018-06-20T15:05:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "We probably will not do this.", "created": "2018-06-04T12:55:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This probably doesn't make sense to prioritize or do for now.", "created": "2018-06-13T17:15:00.000000"}], "components": ["VM Pooler"], "created": "2018-05-16T15:39:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@72b0cbb4"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzm24n:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "5.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2424971934_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler instances are tracked via redis. If redis fails and loses machine state then all known VM state is lost, and any VMs found are considered discovered, then destroyed. If we were to mark a VM as checked out in its annotated data in vmware, along with its expiration time, we would be able to defer destroying the VM until the specified destroy time marked on the VM. As a part of this we would need to evaluate the performance impact of having to find the VM object and update this data as a background process after checking out the VM.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10146", "fixedVersions": [], "id": "10146", "issueType": "Improvement", "key": "POOLER-106", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Won't Do", "resolutionDate": "2018-06-13T17:15:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Discover running vmpooler instances", "timeSpent": "PT0S", "updated": "2018-06-13T17:15:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "All configuration changes needed for this to happen have been completed. I will ensure documentation is created so folks feel prepared to support vmpooler running in marathon and so its differences are documented before proposing a time to switch over.", "created": "2018-05-22T17:02:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Vmpooler runs in a container when deployed on marathon now. We will move production instances by next week.", "created": "2018-06-13T17:14:00.000000"}], "components": [], "created": "2018-05-16T15:33:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7fc228ac"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzm23z:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_522491339_*|*_3_*:*_1_*:*_1902723854_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler uses jruby 1.7.8. Upstream the application has dropped support for this in favor of jruby 9k. We should move to jruby 9k.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10050", "fixedVersions": [], "id": "10050", "issueType": "Improvement", "key": "POOLER-105", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-06-13T17:14:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Move to jruby 9k", "timeSpent": "PT0S", "updated": "2018-06-13T17:14:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "All puppet vmpooler instances now run in docker on marathon.", "created": "2018-06-20T15:05:00.000000"}], "components": ["VM Pooler"], "created": "2018-05-16T14:49:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@38515270"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzm22f:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_525190030_*|*_3_*:*_1_*:*_2499797145_*|*_6_*:*_1_*:*_0"}], "description": "We should run vmpooler in a container. It is able to run in docker on marathon and removes the need for a dedicated VM for the vmpooler application. This would remove the need for QENG-5509.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10085", "fixedVersions": [], "id": "10085", "issueType": "Improvement", "key": "POOLER-104", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2018-06-20T15:05:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Run vmpooler in a container", "timeSpent": "PT0S", "updated": "2018-06-20T15:05:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [], "components": [], "created": "2018-04-19T13:40:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@32f8a1c3"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzlf0f:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_587504_*|*_10009_*:*_1_*:*_2322913494_*|*_5_*:*_1_*:*_0"}], "description": "When introducing YAML.safe_load for configuration files passed in as an environment variable the method used for loading a configuration file from disk was changed. As a result, the configuration is no longer serialized and usable unless loaded as an environment variable.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10084", "fixedVersions": [], "id": "10084", "issueType": "Bug", "key": "POOLER-103", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-05-16T11:05:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Loading of configuration file fails since introducing safe_load", "timeSpent": "PT0S", "updated": "2018-05-16T15:52:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "In QENG-6118 I describe adding a document, which I will also link here, which describes the steps to update a template as it stands today. In sprint planning [~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1] suggested I send something out to platform team describing the changes. I plan to get review from a few folks before sending it out and will plan to send an update about this later this week, or early next week, pending documentation feedback.", "created": "2018-01-23T16:42:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "We talked about this in sprint planning, but updating here for tracking.\n\nI think QE, RE, and the Platform team lists would suffice for the announcement. ", "created": "2018-01-24T11:57:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I sent out an email to the discuss lists for QE, RE and platform team. There are 4 different platform lists so I think I got the correct one, but if I'm mistaken please let me know.", "created": "2018-02-07T12:27:00.000000"}], "components": [], "created": "2018-01-11T10:21:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@697f1cb1"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzj4n3:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "23/Jan/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8513_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_2340386238"}], "description": "With VMs hosted in two data centers, we need to document what this change means to the images workflow for updates and additions. This change has caught folks off guard, who manage these images.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10105", "fixedVersions": [], "id": "10105", "issueType": "Improvement", "key": "POOLER-102", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Done", "resolutionDate": "2018-02-07T12:27:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Document and announce changes to IMAGES workflow", "timeSpent": "PT0S", "updated": "2018-02-21T11:40:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've got a PR up for this https://github.com/puppetlabs/vmpooler/pull/248.", "created": "2018-01-08T10:53:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Merged.", "created": "2018-01-10T13:21:00.000000"}], "components": ["VM Pooler"], "created": "2018-01-08T10:52:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@f3e3cd8"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyl3pj:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_12703_*|*_10007_*:*_1_*:*_181705148_*|*_6_*:*_1_*:*_0"}], "description": "When a host returns nil for quickstats CPU usage it is transformed to 0 and returned as though the host has no utilization present.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10141", "fixedVersions": [], "id": "10141", "issueType": "Bug", "key": "POOLER-100", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-01-10T13:21:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "No quickstats causes host utilization be detected as 0", "timeSpent": "PT0S", "updated": "2018-01-10T13:21:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2018-01-03T13:58:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Team/s", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiselect", "value": "Quality Engineering"}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5e5f3128"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Color", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-color", "value": "ghx-label-1"}, {"fieldName": "Epic Name", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-label", "value": "VMpooler Optimizations 2018"}, {"fieldName": "Epic Status", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-status", "value": "Done"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Issue color", "fieldType": "com.pyxis.greenhopper.jira:jsw-issue-color", "value": "dark_grey"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hziyh3:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_6419_*|*_6_*:*_1_*:*_0_*|*_10015_*:*_1_*:*_147041921834"}], "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10083", "fixedVersions": [], "id": "10083", "issueType": "Epic", "key": "POOLER-99", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Done", "resolutionDate": "2022-09-01T11:57:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "All optimization and improvements tickets targeted for 2018", "timeSpent": "PT0S", "updated": "2022-09-01T11:57:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2017-12-04T16:06:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@54db4825"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzijgf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_23912682971_*|*_5_*:*_1_*:*_0"}], "description": "When a host in a cluster that vmpooler is targeting drops out of the cluster for any reason the application fails to perform new clone operations and gets stuck trying to evaluate machines that are on that host. When this happens the application will repeatedly report that it has discovered machines, then attempts to destroy them, and reports it cannot because the host is disconnected. This ticket serves to track implementing more graceful handling of these failures so vmpooler can continue to repopulate pools instead of being hung up on the host failure.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10140", "fixedVersions": [], "id": "10140", "issueType": "Improvement", "key": "POOLER-98", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Won't Do", "resolutionDate": "2018-09-07T11:31:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Gracefully handle ESXi host failures", "timeSpent": "PT0S", "updated": "2018-09-07T11:31:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2017-11-20T16:07:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@477a631e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzd9rj:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_671267753_*|*_10007_*:*_1_*:*_14682718_*|*_3_*:*_1_*:*_2930002_*|*_5_*:*_1_*:*_0"}], "description": "When submitting a pull request to vmpooler the travis CI tests that run fail reporting that rake is not installed. I cannot reproduce this issue locally with the same bundler version described in .travis.yml. This needs to be fixed in order to allow tests to actually run and until it is will show that all tests fail in all cases.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10175", "fixedVersions": [], "id": "10175", "issueType": "Bug", "key": "POOLER-96", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2017-11-28T15:28:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Travis CI tests fail when setting up environment", "timeSpent": "PT0S", "updated": "2017-11-28T15:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This has already been fixed. I just verified I can delete a powered off VM and watch vmpooler destroy it.", "created": "2018-07-03T12:54:00.000000"}], "components": ["VM Pooler"], "created": "2017-11-13T13:33:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1c403164"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzi63z:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_20038661845_*|*_3_*:*_1_*:*_256682_*|*_6_*:*_1_*:*_0"}], "description": "Powered off virtual machines cannot be destroyed by vmpooler, reporting an InvalidPowerState error. This can probably be resolved by updating the destroy_vm method in vsphere provider to check whether the VM is powered off before attempting to power it off.\n\n{quote}\n[2017-11-13 12:22:50] [!] [win-2008r2-x86_64] 'rrmod0jvt5ktos6' failed while destroying the VM with an error: InvalidPowerState: The attempted operation cannot be performed in the current state (Powered off).\n{quote}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10070", "fixedVersions": [], "id": "10070", "issueType": "Bug", "key": "POOLER-95", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-07-03T12:54:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Powered off VMs cannot be destroyed", "timeSpent": "PT0S", "updated": "2018-07-03T12:54:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I've added a 'view' query parameter that supports all the top level elements in the JSON object.\n\nto return only what ABS needs it should query {{/status?view=queue,pools}}\nit is backwards compatible, as everything is returned if the query parameter is not specified.\n", "created": "2017-10-19T16:14:00.000000"}], "components": [], "created": "2017-10-05T14:45:00.000000", "creator": "557058:cd25c829-41d1-41be-b9ec-df76c04b00c2", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@630c7c96"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzh97r:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "19/Oct/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1212224166_*|*_10007_*:*_1_*:*_435629079_*|*_3_*:*_1_*:*_66124237_*|*_5_*:*_1_*:*_0_*|*_10004_*:*_1_*:*_18022552_*|*_10006_*:*_1_*:*_5143"}], "description": "We have had to make changes in ABS that will now query the vmpooler api /status page for each request: https://puppet.atlassian.net/browse/QENG-5507.  It would probably better to have a different endpoint for ABS to query in order to get just the information it requires rather then hitting the /status endpoint.  ABS needs to know the total count of hosts running (queue::running) and the total count of each platform available (poolX::*).", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10101", "fixedVersions": [], "id": "10101", "issueType": "Story", "key": "POOLER-93", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:cd25c829-41d1-41be-b9ec-df76c04b00c2", "resolution": "Fixed", "resolutionDate": "2017-10-25T15:52:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Create a new api endpoint that provides just what ABS needs", "timeSpent": "PT0S", "updated": "2017-10-25T15:52:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Here is the example pool section. Note there can be one or no 'alias' key, and the value can be a string, a one item array or a multiple items array. The aliases can be:\n# a new name\n# the name of another defined pool, and only in that later case it becomes a 'super pool'. \n\nI suppose ABS could also support allocating for simple aliased pools (ie example 1 just another name for the same pool).\n\n{code}\n\"pools\": {\n    \"debian-7-i386\": {\n      \"ready\": 5,\n      \"running\": 0,\n      \"pending\": 0,\n      \"max\": 5,\n      \"alias\": [\n        \"debian-7-32\"\n      ]\n    },\n    \"debian-7-i386-stringalias\": {\n      \"ready\": 5,\n      \"running\": 0,\n      \"pending\": 0,\n      \"max\": 5,\n      \"alias\": \"debian-7-32-stringalias\"\n    },\n    \"debian-7-x86_64\": {\n      \"ready\": 5,\n      \"running\": 0,\n      \"pending\": 0,\n      \"max\": 5,\n      \"alias\": [\n        \"debian-7-64\",\n        \"debian-7-amd64\"\n      ]\n    },\n    \"debian-7-i386-noalias\": {\n      \"ready\": 5,\n      \"running\": 0,\n      \"pending\": 0,\n      \"max\": 5\n    },\n    \"debian-7-x86_64-alias-otherpool-extended\": {\n      \"ready\": 0,\n      \"running\": 0,\n      \"pending\": 5,\n      \"max\": 5,\n      \"alias\": [\n        \"debian-7-x86_64\"\n      ]\n    }\n  }\n{code}", "created": "2017-10-18T11:23:00.000000"}], "components": [], "created": "2017-10-03T14:43:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@47b363f1"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzh7gf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "10008_*:*_1_*:*_108242330_*|*_1_*:*_1_*:*_1279867544_*|*_10007_*:*_1_*:*_411528861_*|*_3_*:*_1_*:*_101475050_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_3810146"}], "description": "Today when you request the status api (/api/v1/status) and look at the pools, there is no information if there is an alias set for one of them.\n\nExample:\n{code}\npools:\nredhat-7-x86_64:\nready\t10\nrunning\t0\npending\t0\nmax\t10\nredhat-7-x86_64-pix:\nready\t10\nrunning\t0\npending\t0\nmax\t10\n{code}\n\nWe need to have the mapping between pools and what they are also known as for example within each pool:\n\n{code}\npools:\nredhat-7-x86_64:\nalias: ['redhat-7-64']\nready\t10\nrunning\t0\npending\t0\nmax\t10\npools:\nredhat-8-x86_64:\nalias: ['redhat-8-64', 'newredhat']\nready\t10\nrunning\t0\npending\t0\nmax\t10\n{code}\n\nI'm open to having that information populated any other (better) way.\n\nThat way the ABS will be able to use this information when it queries the api to know what pool has any alias setup for it.\n", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10037", "fixedVersions": [], "id": "10037", "issueType": "Improvement", "key": "POOLER-92", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Fixed", "resolutionDate": "2017-10-25T15:52:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Need the alias information in the api /status page", "timeSpent": "PT0S", "updated": "2017-10-25T15:52:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:cd25c829-41d1-41be-b9ec-df76c04b00c2", "body": "[~accountid:557058:295d7a84-a09b-4348-8961-a1e1764c190e] I'm going through our backlog.  Is this still something we should do?", "created": "2018-09-07T11:30:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "[~accountid:557058:cd25c829-41d1-41be-b9ec-df76c04b00c2],\n\nThis would still be very useful as it would enable testing using bolt/winrm away from the cygwin shell that we are currently using.", "created": "2018-09-10T04:35:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "[~accountid:557058:295d7a84-a09b-4348-8961-a1e1764c190e], what port does winrm use? I'm thinking that we could offer support for a per-pool configuration value that would specify the transport port, which would default to 22. Then, for Windows pool that are configured to use winrm we would verify that we can open a socket to that port instead of 22. From the vmpooler perspective the change should be trivial.", "created": "2018-10-03T13:58:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "[~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] The two default ports are:\n* 5985 (HTTP)\n* 5986 (HTTPS)\n\nAt the moment only HTTP is enabled, but I have a ticket up to add HTTPS\n\nYou may need to establish that WinRM is actually active using a WinRM call rather than just simply checking if the port is open.", "created": "2018-10-03T15:06:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966618", "created": "2023-08-03T06:13:00.000000"}], "components": [], "created": "2017-09-06T11:27:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5fdd04d9"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzp84f:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "07/Sep/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_186345979439_*|*_6_*:*_1_*:*_0"}], "description": "The {{vmpooler}} readiness check tests ssh (port 22) to verify that a new machine instance is ready. Code is in [vm_ready| https://github.com/glennsarti/vmpooler/blob/master/lib/vmpooler/providers/vsphere.rb#L269-L274] function.\n\nWe should check instead that {{WinRm}} connection is active (Port 5986 (https)).", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10008", "fixedVersions": [], "id": "10008", "issueType": "Bug", "key": "POOLER-91", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:13:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:13 AM", "summary": "Allow Non-SSH (cygwinless) Windows vmpooler machines", "timeSpent": "PT0S", "updated": "2023-08-03T06:13:00.000000", "votes": "1", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [{"attacher": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "created": "2017-08-07T08:00:00.000000", "name": "vmpooler_issue.txt", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10005"}], "comments": [{"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I tried restarting the vmpooler service twice (it died the first time and was returning error as if not authenticated) but it is still bahaving the same way", "created": "2017-08-07T08:19:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "The issue wasn't in vcenter nor vmpooler.\n\nThe root cause was about 80 jobs relying on both centos-6 and a solaris version (10 or 11). The job would get processed by ABS queue processor, but create a starvation situation. Opening an ABS ticket for that.", "created": "2017-08-07T10:08:00.000000"}], "components": [], "created": "2017-08-07T08:01:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6fd8c8f3"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzfsav:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_7575757_*|*_6_*:*_1_*:*_0"}], "description": "The system is in a state where jobs needing centos6 are queuing, and the VMpooler tries to clone them, but as they move through their normal callflow, suddenly get destroyed as soon as they are checked out to run.\n\n{code}\n[2017-08-07 06:47:17] [ ] [centos-6-x86_64] 'shmv80j2ivwytye' is running on opdx-e6-chassis8-1.ops.puppetlabs.net\n[2017-08-07 06:47:17] [ ] [centos-6-x86_64] 'or81epqcnzdfm45' is running on opdx-e6-chassis8-1.ops.puppetlabs.net\n[2017-08-07 06:47:18] [-] [centos-6-x86_64] 'or81epqcnzdfm45' destroyed in 1.26 seconds\n[2017-08-07 06:47:18] [-] [centos-6-x86_64] 'shmv80j2ivwytye' destroyed in 1.38 seconds\n{code}\n\nSee attached logs", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10163", "fixedVersions": [], "id": "10163", "issueType": "Bug", "key": "POOLER-90", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Fixed", "resolutionDate": "2017-08-07T10:08:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "VMpooler clones, moves to pending, ready, running but quickly destoys VM", "timeSpent": "PT0S", "updated": "2017-08-07T10:17:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've added a change to vmpooler that inspects host.configIssue to determine if any configuration issues are present. When quickstats are not available a configIssue is reported, but an alarm is not getting set, so this should catch this condition and ensure a host in this state is not considered a suitable target for clones or migrations.", "created": "2017-08-15T13:24:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This is now detected and hosts aren't used when reporting configuration issues.", "created": "2017-08-22T10:40:00.000000"}], "components": ["VM Pooler"], "created": "2017-08-03T10:10:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@f47a4b9"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzfqb3:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_10721_*|*_10009_*:*_1_*:*_599995086_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_1043398244"}], "description": "When vmpooler tries to determine where to schedule VMs it asks hosts their current utilization. In OPS-14466 the host stopped returning quickstats causing all VMs to be scheduled on a host that surpassed a max vCPU number due to too many VMs, and machines would no longer power on, or be automatically powered off causing them to be removed from the ready state. We should guard against 0 values being returned when inspecting host utilization. Alternately, or in addition to this, we should ensure quick stats are returning.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10044", "fixedVersions": [], "id": "10044", "issueType": "Bug", "key": "POOLER-89", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2017-08-22T10:40:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler least used host method does not account for quickstats being unavailable", "timeSpent": "PT0S", "updated": "2017-08-22T10:40:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2017-07-14T11:55:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2010f0da"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzfca7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_31732184132_*|*_6_*:*_1_*:*_0"}], "description": "When a provider connection is refused, for this case vSphere, pool_manager will still log that it's Starting to clone. This is running on commit {{7902769611d30821b9a0efded87b3c3bf86ca6d0}} for vmpooler.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10040", "fixedVersions": [], "id": "10040", "issueType": "Bug", "key": "POOLER-88", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Cannot Reproduce", "resolutionDate": "2018-07-16T18:25:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler reports starting to clone when connection is not available", "timeSpent": "PT0S", "updated": "2018-07-16T18:25:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Here's a copy of the log data: https://gist.github.com/mattkirby/d3a4cbdd2c562086fdbb9b5e512be9fe", "created": "2017-07-12T14:29:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I believe this was fixed in vmpooler commit\u00a00b5abd9bd36fdb83c5e581c9216087e564039f79 [https://github.com/puppetlabs/vmpooler/commit/0b5abd9bd36fdb83c5e581c9216087e564039f79]\u00a0.", "created": "2018-04-18T17:59:00.000000"}], "components": ["VM Pooler"], "created": "2017-07-12T14:18:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7a39742a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzfaof:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_24205259185_*|*_6_*:*_1_*:*_0"}], "description": "When encountering a stale entry in ready VMs pool manager reports the following and it cannot resolve the issue.\n\n{quote}\n[2017-07-12 13:16:43] [!] [win-2008-x86_64] 'gs8r7bexqw8dmzx' failed while checking a ready vm : no implicit conversion to rational from nil\n{quote}", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10100", "fixedVersions": [], "id": "10100", "issueType": "Bug", "key": "POOLER-87", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-04-18T17:59:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Fail while checking a ready VM: no implicit conversion to rational from nil", "timeSpent": "PT0S", "updated": "2018-04-18T17:59:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": [], "created": "2017-07-05T14:52:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5d6581e6"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzf5bj:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_37053439087_*|*_5_*:*_1_*:*_0"}], "description": "If I have set the system to have X number of workers threads cloning pools, and I have some that are low they should get refilled in priority.\n\nFor example if a pool is empty it should get the top priority for the say 50% of X that clone pools.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10036", "fixedVersions": [], "id": "10036", "issueType": "Improvement", "key": "POOLER-86", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Won't Do", "resolutionDate": "2018-09-07T11:29:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Clone / refill pools that are empty (or low) in priority", "timeSpent": "PT0S", "updated": "2018-09-07T11:29:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [{"attacher": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "created": "2017-06-26T09:37:00.000000", "name": "Screen Shot 2017-06-26 at 8.36.00 AM.png", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10004"}], "comments": [], "components": [], "created": "2017-06-26T09:37:00.000000", "creator": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5d88dd97"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzey9b:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_33295615370_*|*_6_*:*_1_*:*_0"}], "description": "Several pools in vmpooler-cinext are sitting at {{4/5}} VMs in the ready state. It never tries to fill that last slot. \n\nThis is resulting in jobs waiting in the jenkins queue for resources that will never be there.\n\nRefreshing the pool properly fills it to full.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10139", "fixedVersions": [], "id": "10139", "issueType": "Bug", "key": "POOLER-85", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "resolution": "Fixed", "resolutionDate": "2018-07-16T18:24:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pools not filling to full capacity", "timeSpent": "PT0S", "updated": "2018-07-16T18:24:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This got updated and we use a more modern version now.", "created": "2018-04-18T18:25:00.000000"}], "components": [], "created": "2017-06-14T09:39:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@74a2a078"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzemvr:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "19/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_26642748705_*|*_6_*:*_1_*:*_0"}], "description": "We are using rbvmomi version 1.6.0 and its from 2012. For comparison beaker uses 1.9 and rubygems shows this version information\n\n{code}\n    1.11.2 - April 28, 2017 (176 KB)\n\n    1.11.1 - April 26, 2017 (176 KB)\n\n    1.11.0 - April 05, 2017 (176 KB)\n\n    1.10.0 - March 10, 2017 (176 KB)\n\n    1.9.5 - March 02, 2017 (201 KB)\n\n    1.6.0 - September 19, 2012 (174 KB)\n{code}\n\nWe should update to the latest and fix any issue due to changes. It has been updated in line with ESXi versions so we should keep it in sync or use the latest.\n\nTrying to look on vcenter it seems that at the time of writing this ticket the version of the API was 5.5\n\n{code}\nhttps://vmware-vc2.ops.puppetlabs.net/sdk//vimServiceVersions.xml\n\n<namespaces version=\"1.0\">\n<namespace><name>urn:vim25</name>\n<version>5.5</version>\n<priorVersions><version>5.1</version><version>5.0</version><version>4.1</version><version>4.0</version><version>2.5u2</version><version>2.5</version></priorVersions>\n</namespace>\n<namespace><name>urn:vim2</name><version>2.0</version>\n</namespace></namespaces>\n{code}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10138", "fixedVersions": [], "id": "10138", "issueType": "Task", "key": "POOLER-84", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Fixed", "resolutionDate": "2018-04-18T18:25:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Update rbvmomi", "timeSpent": "PT0S", "updated": "2018-04-18T18:25:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Interestingly beaker has a slightly different way of finding folders for vsphere:\n\nhttps://github.com/puppetlabs/beaker/blob/master/lib/beaker/hypervisor/vsphere_helper.rb#L126-L134\n\n{code}\n  def find_folder(dc,foldername)\n    datacenter = @connection.serviceInstance.find_datacenter(dc)\n    base = datacenter.vmFolder.traverse(foldername)\n    if base != nil\n      base\n    else\n      abort \"Failed to find folder #{foldername}\"\n    end\nend\n{code}", "created": "2017-06-07T07:44:00.000000"}, {"author": "557058:d266d245-5cba-4a99-842a-d1e061513459", "body": "I would strongly encourage us to make it so that vmpooler can be told an exact data center and an exact cluster within that data center as this will become more important as time goes on. ", "created": "2017-06-10T11:08:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Started work on this\n\nhttps://github.com/puppetlabs/vmpooler/pull/223", "created": "2017-06-14T18:40:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "I've synced the code on vmpooler-dev and will test it as soon as possible but currently blocked by OPS-14272", "created": "2017-07-05T10:29:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "To add an update here the changes in vmpooler PR 223 got merged.", "created": "2017-07-06T11:20:00.000000"}], "components": ["VM Pooler"], "created": "2017-06-07T04:38:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5adec992"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Method Found", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Manual Test"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hylntz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "10/Jun/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2440205818_*|*_10009_*:*_1_*:*_2849681389_*|*_5_*:*_1_*:*_0"}], "description": "I do not want to prescribe what the solution is but the use case is:\nWe have 2 datacenters, namely opdx2 and pix both with VMpooler 'templates' and their own datastores. Currently the vmpooler code seem to operate on the first datacenter it finds.\n\nI can suggest two different approaches:\n# We make vmpooler code independent of datacenter information eg traverse the [rootFolder|https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/index.html] of vcenter to find templates and create VMs\n# We make vmpooler aware of datacenters and make it a new parameter per pool, with also a default value.\n\n\nFor example the incompatible code uses rbvmomi methods that return the first found datacenter (and other places):\nhttps://github.com/puppetlabs/vmpooler/blob/1fcb19bd7bedda0d930af743e43ae1ae5b79dd6f/lib/vmpooler/providers/vsphere.rb#L500-L501\n\nDuring my investigation I also noticed we are [using rbvmomi version 1.6.0|https://github.com/puppetlabs/puppetlabs-modules/blob/99d58083ee2a5563caf4d86108079737e754f88a/site/profile/manifests/vmpooler.pp#L37-L37] ! It's very old as I couldn't even find it in github but rubygems shows:\n{code}\n    1.11.2 - April 28, 2017 (176 KB)\n    1.11.1 - April 26, 2017 (176 KB)\n    1.11.0 - April 05, 2017 (176 KB)\n    1.10.0 - March 10, 2017 (176 KB)\n    1.9.5 - March 02, 2017 (201 KB)\n    1.6.0 - September 19, 2012 (174 KB)\n{code}\n\n", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10041", "fixedVersions": [], "id": "10041", "issueType": "Bug", "key": "POOLER-83", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Fixed", "resolutionDate": "2017-08-07T10:03:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "[vmpooler] Support for vcenter with multiple datacenters (namely Pix LTS cluster)", "timeSpent": "PT0S", "updated": "2017-08-07T10:03:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The change you're seeing looks to be a result of increasing memory on master platforms from 4GB to 6GB in QENG-4878. Vmpooler snapshots memory, in addition to quiescing the filesystem, both of which are very expensive snapshot operations. Add 50% memory, and I'd expect it could be taking as much longer. Looking at snapshot timings from vmpooler logs this looks to be the case.\n\nThe fact that these clones are themselves based on a linked clone likely exacerbates the problem.\n\nOn 4/16:\n{quote}\n[2017-04-16 22:24:15] [+] [snapshot_manager] 'p0w8or147rzz23t' snapshot created in 495.08 seconds\n[2017-04-16 22:24:35] [+] [snapshot_manager] 'cctj26qd6a6t544' snapshot created in 514.71 seconds\n[2017-04-16 22:25:18] [+] [snapshot_manager] 'mjz147ntmfp70wy' snapshot created in 557.86 seconds\n[2017-04-16 22:25:18] [+] [snapshot_manager] 'mane9qsxswzz3s2' snapshot created in 426.07 seconds\n[2017-04-16 22:25:50] [+] [snapshot_manager] 'w8seabtkypvub0w' snapshot created in 458.71 seconds\n[2017-04-16 22:26:31] [+] [snapshot_manager] 'm4akedgqgf9mjlp' snapshot created in 636.03 seconds\n[2017-04-16 22:26:31] [+] [snapshot_manager] 'cqpxctkqnvi8u2r' snapshot created in 498.99 seconds\n[2017-04-16 22:26:31] [+] [snapshot_manager] 'l5nrrjcatjq8fgp' snapshot created in 507.02 seconds\n[2017-04-16 22:27:05] [+] [snapshot_manager] 'd9n0z6h9at9q2gf' snapshot created in 541.00 seconds\n[2017-04-16 22:27:06] [+] [snapshot_manager] 'w6zx0p0has0i29f' snapshot created in 533.77 seconds\n[2017-04-16 22:27:06] [+] [snapshot_manager] 'h3xubmdmz28o6xl' snapshot created in 534.17 seconds\n[2017-04-16 22:27:06] [+] [snapshot_manager] 'vwiqdbe8cys8vg7' snapshot created in 580.40 seconds\n{quote}\n\nToday:\n{quote}\n[2017-04-28 10:23:16] [+] [snapshot_manager] 'wvotn2mbrz7kk9s' snapshot created in 381.73 seconds\n[2017-04-28 10:29:37] [+] [snapshot_manager] 'au5qqsei16bpchw' snapshot created in 381.51 seconds\n[2017-04-28 10:29:42] [+] [snapshot_manager] 'wio3t2iel61a2hb' snapshot created in 386.38 seconds\n[2017-04-28 10:30:17] [+] [snapshot_manager] 'tmzdhqvpfa8bivr' snapshot created in 421.41 seconds\n[2017-04-28 10:30:17] [+] [snapshot_manager] 'yjrqtiow9spb2ga' snapshot created in 421.42 seconds\n[2017-04-28 10:30:17] [+] [snapshot_manager] 'ebjkm44btbg8l6d' snapshot created in 421.44 seconds\n{quote}", "created": "2017-04-28T12:27:00.000000"}, {"author": "557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0", "body": "Do you have similar timings for how long it is taking to restore snapshots?  Could you look at the logs from early this morning - 2am PST - when we were seeing those time out in our automation?", "created": "2017-04-28T12:35:00.000000"}, {"author": "557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0", "body": "And... more importantly, how long it is sitting in the queue to be restored. Once the restore starts, it doesn't seem to be taking longer, it's just taking a longer time to start.  I'm getting this data by measuring the time from my restore request to vmpooler to the time that the host goes offline to be restored.", "created": "2017-04-28T12:38:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I don't think we have any data about how long these operations are sitting in the queue, other than what is inferred by the times reported in the logs.\nHere's everything from vmpooler.log about snapshots today.\n{quote}\n[2017-04-28 00:17:31] [ ] [snapshot_manager] 'plr71mkrq8bdes5' is being reverted to snapshot 'oxte6pgpnbrkzm3399d1bg34jwuydrk9'\n[2017-04-28 00:17:40] [ ] [snapshot_manager] 'pcnb4wuj7rnzb1d' is being reverted to snapshot 'j1qarc3gw5v9k8qul28xti4fosm8jjv6'\n[2017-04-28 00:17:40] [<] [snapshot_manager] 'plr71mkrq8bdes5' reverted to snapshot in 9.70 seconds\n[2017-04-28 00:17:53] [ ] [snapshot_manager] 'kwezozn1dmedmw2' is being reverted to snapshot 'pu2pshrli9b4km4ly656ad33955mzew6'\n[2017-04-28 00:17:53] [ ] [snapshot_manager] 'lyvbg876h3i25uv' is being reverted to snapshot 'sfu9k5mpnbf7a8pep0ytp48yi3aowzkt'\n[2017-04-28 00:17:53] [<] [snapshot_manager] 'pcnb4wuj7rnzb1d' reverted to snapshot in 12.40 seconds\n[2017-04-28 00:18:18] [<] [snapshot_manager] 'kwezozn1dmedmw2' reverted to snapshot in 25.45 seconds\n[2017-04-28 00:18:18] [<] [snapshot_manager] 'lyvbg876h3i25uv' reverted to snapshot in 25.45 seconds\n[2017-04-28 10:16:54] [ ] [snapshot_manager] 'wvotn2mbrz7kk9s' is being snapshotted\n[2017-04-28 10:23:16] [ ] [snapshot_manager] 'au5qqsei16bpchw' is being snapshotted\n[2017-04-28 10:23:16] [ ] [snapshot_manager] 'ebjkm44btbg8l6d' is being snapshotted\n[2017-04-28 10:23:16] [ ] [snapshot_manager] 'tmzdhqvpfa8bivr' is being snapshotted\n[2017-04-28 10:23:16] [ ] [snapshot_manager] 'wio3t2iel61a2hb' is being snapshotted\n[2017-04-28 10:23:16] [ ] [snapshot_manager] 'yjrqtiow9spb2ga' is being snapshotted\n[2017-04-28 10:23:16] [+] [snapshot_manager] 'wvotn2mbrz7kk9s' snapshot created in 381.73 seconds\n[2017-04-28 10:29:37] [+] [snapshot_manager] 'au5qqsei16bpchw' snapshot created in 381.51 seconds\n[2017-04-28 10:29:42] [+] [snapshot_manager] 'wio3t2iel61a2hb' snapshot created in 386.38 seconds\n[2017-04-28 10:30:17] [+] [snapshot_manager] 'tmzdhqvpfa8bivr' snapshot created in 421.41 seconds\n[2017-04-28 10:30:17] [+] [snapshot_manager] 'yjrqtiow9spb2ga' snapshot created in 421.42 seconds\n[2017-04-28 10:30:17] [+] [snapshot_manager] 'ebjkm44btbg8l6d' snapshot created in 421.44 seconds\n{quote}", "created": "2017-04-28T12:47:00.000000"}, {"author": "557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0", "body": "[~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] Interesting... thanks.  The snapshots appear to have ben reverted very quickly. I think this may actually have a different root cause.  My restore function waits for the nodes to lose connectivity, and then regain connectivity, however it waits a progressively longer amount of time to check for losing connectivity just because the Beaker function I am using works that way. I think it may be that the node is going offline and coming back online quickly enough that the function never even notices it go offline and just waits forever.\n\nThanks so much for digging up the logs! I will add some additional logging to my stuff and see if I can verify whether that is the cause.", "created": "2017-04-28T15:00:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "[~accountid:557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0] is this still a CI Blocker for you?", "created": "2017-05-02T13:10:00.000000"}, {"author": "557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0", "body": "No, it was indeed a different root cause.  I've got a Beaker PR up to address that issue: https://puppet.atlassian.net/browse/BKR-1116", "created": "2017-05-02T14:20:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Has this continued to be an issue?", "created": "2018-04-16T13:16:00.000000"}, {"author": "557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0", "body": "No, this can be closed.", "created": "2018-04-16T14:48:00.000000"}], "components": ["VM Pooler"], "created": "2017-04-28T11:33:00.000000", "creator": "557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@437cd60b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzdnmn:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "28/Apr/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_30547316406_*|*_6_*:*_1_*:*_0"}], "description": "This started earlier this week, on Wednesday.  It has persisted and is still going very slowly.  This is impacting the HA automation which has automated snapshotting and restoring snapshots to save time on getting into a good state.  It is necessary for the HA automation since once you promote a replica to a master, there is no simple way to get it back into a clean state without restoring a snapshot.\n\nWhat I was seeing previously is that the vast majority of the time, it was taking < 1 minute to begin the restore process.  What I am seeing now - not entirely consistently is that many requests are taking 10-20 minutes, and some in the HA automation are even timing out after 30 minutes, which is the timeout we have set for the automation.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10082", "fixedVersions": [], "id": "10082", "issueType": "Bug", "key": "POOLER-82", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:a69e978d-6cea-49f9-9227-3a3ef8a776c0", "resolution": "Cannot Reproduce", "resolutionDate": "2018-04-17T00:55:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "VMPooler snapshot/restore functionality is extremely slow", "timeSpent": "PT0S", "updated": "2018-04-17T00:55:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I believe this was due to vmpooler instances looking to the same directories. Please correct me if I'm wrong and these issues are persisting.", "created": "2018-04-18T18:26:00.000000"}], "components": [], "created": "2017-03-03T12:07:00.000000", "creator": "557058:2d7665f0-9d98-407f-86d7-8c4ec6a18b20", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@fdc9aa2"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzcbg7:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "19/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_35529583185_*|*_6_*:*_1_*:*_0"}], "description": "During builds of puppet-agent, hosts are losing their connection to vmpooler and the infrastructure. They are no longer listed under the pooler folders on vsphere, but vmpooler-cinext still thinks the hosts are checked out.\n\nVsphere also does not list an IP address for the host.\n\nm9m2e3bvuu3cy3l.delivery.puppetlabs.net is an example", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10145", "fixedVersions": [], "id": "10145", "issueType": "Bug", "key": "POOLER-80", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:2d7665f0-9d98-407f-86d7-8c4ec6a18b20", "resolution": "Fixed", "resolutionDate": "2018-04-18T18:26:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Hosts \"disappearing\" from network during agent build", "timeSpent": "PT0S", "updated": "2018-04-18T18:26:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I ran through the example listed under \"[Remove stale VMs from ready queue|https://confluence.puppetlabs.com/display/SRE/vmpooler#vmpooler-RemovestaleVMsfromreadyqueue] and removed several stale VMs. 234 stale VMs were found listed in ready queues among various vmpooler pools. The following list shows what was remaining.\n\n{quote}\nRemoving im45njt1uqzygku from amazon-201403-i386\nRemoving gngvfw8cxab0wh1 from amazon-201403-x86_64\nRemoving c7tzzrdba0mc30s from centos-4-i386\nRemoving li2wsmdnc026ttd from centos-4-x86_64\nRemoving vc232mjdlny2y87 from centos-4-x86_64\nRemoving n3r52l4grk0cz0x from centos-5-i386\nRemoving pstex1ccgeayk4f from centos-5-i386\nRemoving scrlafyiggl1qks from centos-5-i386\nRemoving v09r55u6i4a9il0 from centos-5-i386\nRemoving uvbd9hh67njnnfz from centos-5-x86_64\nRemoving kp269edtkg8vrhc from centos-5-x86_64\nRemoving w5s4evncryesy6y from centos-5-x86_64\nRemoving oa7xtg4derj5vka from centos-5-x86_64\nRemoving tfp8nfgo5e7mohl from centos-6-i386\nRemoving d5xkl33118v5b1z from centos-6-i386\nRemoving jdfdzvd5cg403zn from centos-6-i386\nRemoving c17iuuigvh34919 from cisco-wrlinux-5-x86_64\nRemoving eg6gqb3kvidjhxw from cisco-wrlinux-5-x86_64\nRemoving m0j82c6ku4lgmka from cisco-wrlinux-5-x86_64\nRemoving d3fzkxv5fqc04bs from cisco-wrlinux-7-x86_64\nRemoving a2hcdyhqpdp01w8 from cisco-wrlinux-7-x86_64\nRemoving go899blc56ajvrv from cisco-wrlinux-7-x86_64\nRemoving iuazfd3nivrhbgm from cisco-nxos-9k-x86_64\nRemoving edecat8c8d7ybi2 from cisco-nxos-9k-x86_64\nRemoving d7pzqiu8zsfdokx from cumulus-vx-25-x86_64\nRemoving bfjvyfx0zdw0tda from cumulus-vx-25-x86_64\nRemoving qm9qlt2us8jvdo0 from debian-6-i386\nRemoving hu7h2qewpr5prf1 from debian-6-x86_64\nRemoving m26i40w5dii7j69 from debian-6-x86_64\nRemoving i9lx9ci3p72o35m from debian-7-i386\nRemoving fnolyr5w5zsk9p0 from debian-7-i386\nRemoving b92x9ojvub8f9rz from debian-7-i386\nRemoving dap489zx7wt9e9j from debian-7-i386\nRemoving e8sb6ipfoo7vpqu from debian-7-i386\nRemoving j4jh28nhljr1rec from debian-8-i386\nRemoving c701w86mj6zg6bn from debian-8-i386\nRemoving hox6aicn5pii3ev from debian-8-i386\nRemoving vwf07csv99xqdsu from debian-8-i386\nRemoving lhi2t06pvhoc110 from debian-8-x86_64\nRemoving xy93fq1238ne8b0 from debian-8-x86_64\nRemoving xmsa9egb228v3ap from debian-9-i386\nRemoving bd9v5we2k6dhik1 from debian-9-i386\nRemoving ngtl3gmtd1iky4d from debian-9-i386\nRemoving hkizhi18za4lq89 from debian-9-x86_64\nRemoving kpfyolslvmpasas from debian-9-x86_64\nRemoving y3mrlbaiox8e9w9 from debian-9-x86_64\nRemoving iq2mzalmr8vkkuy from debian-9-x86_64\nRemoving oyt7drt4hr8r6x4 from debian-9-x86_64\nRemoving a39oiaz1nw7xibu from fedora-14-i386\nRemoving msdbtiao1vgwkn6 from fedora-14-i386\nRemoving sp136fmq0rrqppq from fedora-14-i386\nRemoving x6rp49hexn8557q from fedora-14-i386\nRemoving bk3xpfde95w18ju from fedora-21-i386\nRemoving up4xgpsi380c2gt from fedora-21-x86_64\nRemoving pwdpeu2rqu6glvy from fedora-22-i386\nRemoving ph5p18g0i8fi8ac from fedora-22-i386\nRemoving l7bkpq6v73mb876 from fedora-22-x86_64\nRemoving o7gkr5y17v6k9ft from fedora-22-x86_64\nRemoving uf9kx7d44ollwbi from fedora-23-i386\nRemoving qbsn9dm97fmjl3a from fedora-23-i386\nRemoving nfu13yc4mns3dfm from fedora-23-i386\nRemoving azzihowpc7sh3vd from fedora-23-i386\nRemoving et24fuqbq89k4wo from fedora-23-x86_64\nRemoving dcgbns3w5noccmx from fedora-23-x86_64\nRemoving okgt2fgzwtky4pl from fedora-23-x86_64\nRemoving nyxez9obzonjfgp from fedora-23-x86_64\nRemoving ibpmf28ybkl04er from fedora-24-i386\nRemoving hvlcts56q3xjeod from fedora-24-i386\nRemoving hfrf89d6ryzuguh from fedora-24-i386\nRemoving lt01a0ytauqv9so from fedora-24-i386\nRemoving vfpflo43c1hpi6f from fedora-25-i386\nRemoving v1viti1bqvh6fuq from fedora-25-i386\nRemoving d06rj2pvidcisd4 from fedora-25-i386\nRemoving vnlwz16w98w7r7l from fedora-25-x86_64\nRemoving ymon31k2admxagx from fedora-25-x86_64\nRemoving j3nfefodecmxte3 from fedora-25-x86_64\nRemoving qf5j0fzdlizw7wj from fedora-25-x86_64\nRemoving yodn4o3majdq2lf from fedora-25-x86_64\nRemoving n7rlid02vgs4emk from opensuse-11-x86_64\nRemoving l6e7d0d34po8c1x from opensuse-11-x86_64\nRemoving she8u43ptv1ldbr from oracle-5-i386\nRemoving qrlb0znhqkl71e1 from oracle-5-i386\nRemoving yl93fl7mudmg9u5 from oracle-5-i386\nRemoving mhsf1lkhvh9oo5j from oracle-5-i386\nRemoving b6voi7ffesfroji from oracle-5-x86_64\nRemoving vhelurzmqs4vimb from oracle-5-x86_64\nRemoving okymw27is4t6ppp from oracle-5-x86_64\nRemoving iou43cr2m1b3p14 from oracle-6-i386\nRemoving f046go8762f45t5 from oracle-6-i386\nRemoving oc2hyrf6gagoqvg from oracle-6-i386\nRemoving ybqy72wsx6vj3sj from oracle-6-x86_64\nRemoving os6cbxr1hvd9cnz from oracle-6-x86_64\nRemoving fuzz8x1byom8whr from oracle-6-x86_64\nRemoving nx110c5kmtm3lnj from oracle-6-x86_64\nRemoving qqbyq0q918it9ev from oracle-6-x86_64\nRemoving r2wopguttui2o2t from oracle-7-x86_64\nRemoving awthaakkzqsc1e0 from oracle-7-x86_64\nRemoving gy9godwgqnjgu5k from oracle-7-x86_64\nRemoving kxz99708yfn098j from oracle-7-x86_64\nRemoving a3hr4f0a9dv2nib from oracle-7-x86_64\nRemoving c7lr33sehr35ekz from osx-1011-x86_64\nRemoving tzbbabbvrlfpnt2 from redhat-4-i386\nRemoving gfpfz6v47a2kyhk from redhat-4-i386\nRemoving y6dmgwjvey8wa4f from redhat-4-i386\nRemoving v0dobwcfkouhxxa from redhat-4-x86_64\nRemoving u21ixtupmtqp8h2 from redhat-4-x86_64\nRemoving xrd4h8fdrb3il1c from redhat-5-i386\nRemoving a6j67okusgxrqfe from redhat-5-i386\nRemoving lpm2nfxa9e322yh from redhat-5-i386\nRemoving wdtghhltwthr7ac from redhat-5-i386\nRemoving uy9voln567p03ty from redhat-5-i386\nRemoving yevi50zhdu38657 from redhat-5-i386\nRemoving g5zvbob4on0l0py from redhat-5-x86_64\nRemoving o28n3tts3w9vbw0 from redhat-5-x86_64\nRemoving eeepckvt4ol0x37 from redhat-5-x86_64\nRemoving txbzlukvq6262jc from redhat-5-x86_64\nRemoving shnufyoqx0mqfwq from redhat-5-x86_64\nRemoving enzbwglfzh7yiy1 from redhat-6-i386\nRemoving ap87m26lzfpgfdc from redhat-6-i386\nRemoving mna3iu1zxpfjqvn from redhat-6-i386\nRemoving rs1m6avriwa1jw2 from redhat-6-i386\nRemoving rg34raxs2yi5pzp from redhat-6-i386\nRemoving ymi3xaur06epnyy from scientific-5-i386\nRemoving nm1urqvhzmwxqmx from scientific-5-i386\nRemoving cfuktijg92doyus from scientific-5-i386\nRemoving i75lmial1061s4s from scientific-5-x86_64\nRemoving fpdqyr5hzcfzhan from scientific-5-x86_64\nRemoving ilrup07gqvkfgwh from scientific-5-x86_64\nRemoving s6yft52hjt9w90g from scientific-5-x86_64\nRemoving p4kh1xk5pbzexfe from scientific-6-i386\nRemoving uutu8hi1gpk71o3 from scientific-6-i386\nRemoving glc8nsl8xsmyrqs from scientific-6-i386\nRemoving gas2t5zfxm4jzzv from scientific-6-x86_64\nRemoving v4wp6nmg2y75kv2 from scientific-6-x86_64\nRemoving i7kxh5po2yzdapz from scientific-6-x86_64\nRemoving sszzhwoouufsqdy from scientific-6-x86_64\nRemoving rsst94g8iesod85 from scientific-6-x86_64\nRemoving kz2tsc3cc2woocf from scientific-6-x86_64\nRemoving byskxjwet1kg7og from scientific-7-x86_64\nRemoving uzkcp1kljk60atb from scientific-7-x86_64\nRemoving smscmnvdjiopww5 from scientific-7-x86_64\nRemoving q7lzimeujap0rrk from scientific-7-x86_64\nRemoving uwp4e4iwf82ijbj from scientific-7-x86_64\nRemoving nwbyedtbsgn1fsd from sles-10-i386\nRemoving wu3t8md4nyip3ok from sles-10-i386\nRemoving q1l3rkb9ieccpef from sles-10-i386\nRemoving fy09z2fxak6i4x9 from sles-10-i386\nRemoving cbtgfikhtpxc6wv from sles-10-x86_64\nRemoving colid3n7we92inc from sles-10-x86_64\nRemoving wvd0fut47mk7vkb from sles-10-x86_64\nRemoving lrbssqm43rptq38 from sles-10-x86_64\nRemoving ma8dxspfzr94rxo from sles-10-x86_64\nRemoving twz1vjk7z1m0olk from sles-10-x86_64\nRemoving t7xm8bohinhrtqa from sles-11-i386\nRemoving j9lslx08eibtspt from sles-11-i386\nRemoving ul89fjdynxhag1k from sles-11-i386\nRemoving qy28ol14rhiaux4 from sles-11-i386\nRemoving qga4dtnu81l55pp from vro-6-x86_64\nRemoving wwxgi8mov6kkkqo from vro-6-x86_64\nRemoving lb9tilqepjjepp4 from vro-7-x86_64\nRemoving lz3xpx834dyhoah from vro-7-x86_64\nRemoving a80s1ql0xv7vg7e from vro-7-x86_64\nRemoving n4nzjt7hl8k59xz from solaris-10-u8-x86_64\nRemoving gyh7vc3i26f6orm from solaris-10-u8-x86_64\nRemoving tfye8nwf95egumy from solaris-10-u8-x86_64\nRemoving k47kob7sfmiwxpd from solaris-112-x86_64\nRemoving myk6ngbn1g8qsvg from solaris-112-x86_64\nRemoving hy3onfnd54y71k8 from solaris-112-x86_64\nRemoving yrjvqhc04h09jp0 from solaris-112-x86_64\nRemoving db9hb61f22w265q from solaris-112-x86_64\nRemoving bapi1iqiq6x4hmi from solaris-112-x86_64\nRemoving lyp1gq5zh9iw2c8 from ubuntu-1004-i386\nRemoving rc7pfrjfkznyksq from ubuntu-1004-x86_64\nRemoving pxb8a4w2827kddc from ubuntu-1004-x86_64\nRemoving sdgnvi5togskr3s from ubuntu-1004-x86_64\nRemoving o0douovjszx5yni from ubuntu-1004-x86_64\nRemoving u8vdw74p1hzy00j from ubuntu-1204-i386\nRemoving dyxjuntkjsdxd3f from ubuntu-1204-i386\nRemoving xcky8sngsg28r73 from ubuntu-1204-i386\nRemoving cc3st2wpojn7wrx from ubuntu-1204-i386\nRemoving p57ohlcc1y72s2e from ubuntu-1204-x86_64\nRemoving besx8qatkytxiwt from ubuntu-1404-i386\nRemoving gxas9j0hn57bqj2 from ubuntu-1404-i386\nRemoving l2irisxw7tmembg from ubuntu-1404-i386\nRemoving q7ie8w3bkwp65zu from ubuntu-1504-i386\nRemoving bwk07kz77pgw9kf from ubuntu-1504-i386\nRemoving f2glplhzwqccw6s from ubuntu-1504-i386\nRemoving of954abup1716fc from ubuntu-1504-i386\nRemoving pks6dmjuubp7d52 from ubuntu-1504-x86_64\nRemoving kqaiarhdysqtg29 from ubuntu-1504-x86_64\nRemoving ulh7ygxbs6rn1ly from ubuntu-1504-x86_64\nRemoving x9zd2ddjllm4xgp from ubuntu-1510-i386\nRemoving eixt5kxhr7f3rz6 from ubuntu-1510-i386\nRemoving ydhk3ijuo2o0y4j from ubuntu-1510-i386\nRemoving yz4227jrjtsgo54 from ubuntu-1510-x86_64\nRemoving h9un5v6z6d6xads from ubuntu-1510-x86_64\nRemoving mzfiixih5gy9ni3 from ubuntu-1510-x86_64\nRemoving qc2smpq4x1e4mc3 from ubuntu-1510-x86_64\nRemoving suh2ajmvrh5lm7a from ubuntu-1510-x86_64\nRemoving smplqdfdqc77cgn from ubuntu-1510-x86_64\nRemoving bvns0iufwxmbr7t from ubuntu-1510-x86_64\nRemoving w40eo5y575nfu03 from ubuntu-1510-x86_64\nRemoving av0h55qtyfnjhzc from ubuntu-1510-x86_64\nRemoving nb8nud0zup99ypf from ubuntu-1604-i386\nRemoving m14n3f7cqa4h47p from ubuntu-1610-i386\nRemoving svhmobt2khdg2xw from ubuntu-1610-i386\nRemoving y4zhuxz9f9rwqvv from ubuntu-1610-i386\nRemoving dck42oauty5jhvf from ubuntu-1610-i386\nRemoving wwlhzncdz1bm2d9 from ubuntu-1610-i386\nRemoving egdemaqxuzrjen4 from ubuntu-1610-x86_64\nRemoving inrihq2edztmyvd from win-2008-x86_64\nRemoving ideeqll98xnpv3l from win-2008-x86_64\nRemoving tz1ypow21yfdi8m from win-2008-x86_64\nRemoving guaps5hs0p5h8jk from win-2008-x86_64\nRemoving cqnfmd4uqabj1tu from win-2008-x86_64\nRemoving fqcmiq55vih5i8b from win-2008r2-x86_64\nRemoving felmbw9rk04j39f from win-2012-x86_64\nRemoving o0nf6xodkywkniz from win-2012-x86_64\nRemoving i9n334mz10loae0 from win-2012-x86_64\nRemoving gdtf1mpmdjonvt8 from win-2012-x86_64\nRemoving mb6y7tkkzpogm30 from win-2012-x86_64\nRemoving djyw4zsj4jxe9i9 from win-2012-x86_64\nRemoving uux0bwh58bbgdtt from win-2012-x86_64\nRemoving xlbum3jdooiuzzv from win-2012r2-ja-x86_64\nRemoving pvnzihzarvpfltj from win-2012r2-core-x86_64\nRemoving pvx7gdgl9et3pum from win-7-x86_64\nRemoving n83yh1kkhskjquc from win-7-x86_64\nRemoving qjikjomac5mggl6 from win-10-ent-i386\nRemoving eci9unullovco8k from win-10-ent-i386\nRemoving w44iwgc13lc751o from win-10-ent-i386\nRemoving gb467tjt8evpun0 from win-10-ent-i386\nRemoving vqmc9vgwrn2cuqu from win-10-ent-x86_64\nRemoving v5k4mg5c5fwi2g4 from win-10-ent-x86_64\nRemoving cptefq1m2br32ux from win-10-ent-x86_64\n{quote}", "created": "2017-02-27T15:30:00.000000"}], "components": ["VM Pooler"], "created": "2017-02-27T11:25:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@63816255"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzc5d3:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_28507_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_14703121"}], "description": "Numerous reports have come in today about VMs being handed out by vmpooler that cannot be reached. In all instances these VMs were destroyed on 2/13, but requests for them were made today, and the names were handed out at that time. This ticket serves to track walking through VMs in the ready state, validating they exist, and removing them if they do not.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10135", "fixedVersions": [], "id": "10135", "issueType": "Bug", "key": "POOLER-79", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2017-02-27T15:30:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Audit pool entries for stale VMs", "timeSpent": "PT0S", "updated": "2017-02-27T15:30:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "[~accountid:557058:74ced041-721a-48ec-853a-35c3cf9ebfa9] are you able to reproduce this outside of vmfloaty?", "created": "2017-02-27T10:29:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The node mentioned here has the following information in vmpooler logs. It looks like it was destroyed on 2/13. When was this VM requested?\n\n{quote}\n[2017-02-13 09:44:37] [ ] [centos-7-x86_64] 'dxg8rgm95izt8cl' is being cloned from 'centos-7-x86_64'\n[2017-02-13 09:44:44] [+] [centos-7-x86_64] 'dxg8rgm95izt8cl' cloned from 'centos-7-x86_64' in 7.33 seconds\n[2017-02-13 09:46:37] [>] [centos-7-x86_64] 'dxg8rgm95izt8cl' moved to 'ready' queue\n[2017-02-13 12:31:24] [ ] [centos-7-x86_64] 'dxg8rgm95izt8cl' is being shut down\n[2017-02-13 12:31:26] [-] [centos-7-x86_64] 'dxg8rgm95izt8cl' destroyed in 2.12 seconds\n[2017-02-27 00:41:58] [!] [centos-7-x86_64] 'dxg8rgm95izt8cl' not found in inventory, removed from 'completed' queue\n{quote}", "created": "2017-02-27T10:50:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "[~accountid:557058:74ced041-721a-48ec-853a-35c3cf9ebfa9] reported in channel having requested this VM today. [~accountid:623c0cebbef8a60068c7977d] reported seeing this with beaker with a different machine as well. Something appears to have happened on 2/13 so I created POOLER-79 to clean up after whatever mess has been made.", "created": "2017-02-27T11:54:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I resolved this issue via POOLER-79. See that ticket for more detail, but the gist is stale VMs were in the ready queue that have been removed.", "created": "2017-02-27T15:31:00.000000"}], "components": ["VM Pooler"], "created": "2017-02-26T07:08:00.000000", "creator": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@69f2f852"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzc4y7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "27/Feb/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_106226618_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_10345289"}], "description": "Attempts to create pooler instances (via {{floaty}}) result in instances being created but without an ip assigned, its not possible to resolve the vms fqdn.\n\nHave checked for both {{centos-7-x86_64}} and {{ubuntu-1610-x86_64}} templates.\n\n{code}\n$ floaty get centos-7-x86_64\n{\"centos-7-x86_64\":\"dxg8rgm95izt8cl.delivery.puppetlabs.net\"}\n$ floaty query dxg8rgm95izt8cl.delivery.puppetlabs.net\n{\"ok\"=>true,\n \"dxg8rgm95izt8cl\"=>\n  {\"template\"=>\"centos-7-x86_64\",\n   \"lifetime\"=>12,\n   \"running\"=>0.01,\n   \"state\"=>\"running\",\n   \"ip\"=>\"\",\n   \"domain\"=>\"delivery.puppetlabs.net\"}}\n$ ping dxg8rgm95izt8cl.delivery.puppetlabs.net\nping: cannot resolve dxg8rgm95izt8cl.delivery.puppetlabs.net: Unknown host\n{code}\n\nMy {{~/.vmfloaty.yml}} is pointing at:\n{code}\nurl: 'https://vmpooler.delivery.puppetlabs.net/api/v1'\n{code}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10038", "fixedVersions": [], "id": "10038", "issueType": "Bug", "key": "POOLER-78", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Blocker", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "resolution": "Fixed", "resolutionDate": "2017-02-27T15:31:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "pooler instances created without ips assigned", "timeSpent": "PT0S", "updated": "2017-02-27T15:31:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c1349bef8a60068c79b09", "attachments": [], "comments": [{"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "Using parameterised Jenkins job sounds like a sensible approach to me.", "created": "2017-02-23T12:06:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Are there specific maintenance needs that come to mind? I don't have anything specific to put forth here and would like to close this if we don't have something specific.", "created": "2018-04-18T17:54:00.000000"}, {"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "[~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72]\nThe main ones that come to mind are:\n# Refresh pool (after deploying new template)\n# Flush pool and stop cloning - the current way of discontinuing a pool is messy and this would help a lot.\n# Reread vmpooler.yaml - this would avoid having to restart if we are adding a new pool. However, its probably a nice and complex task as you also have to figure out what has changed.\n\n", "created": "2018-04-19T09:42:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Some capabilities have been added via the configuration API. Suggestions are welcome if you would like to see more capabilities added in the future.", "created": "2018-07-16T18:28:00.000000"}], "components": ["VM Pooler"], "created": "2017-02-23T11:02:00.000000", "creator": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@17e0ce7a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzc1an:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "23/Feb/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_505581_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_43913870037"}], "description": "There are maintenance tasks that we commonly need to run on various vmpooler instances that would be more accessible to those who need them performed if we didn't require a vmpooler subject matter expert to do them. That role has been passed between various folks over the years and currently belongs to [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72].\n\nI suspect most of these maintenance tasks are usually series of command line interactions with the vmpooler instance's respective redis instance...I'm not sure what all the tasks are, but at least one that I have seen requested repeatedly in hipchat is to purge a particular pool.\n\nProbably the shortest path to automating this task would be to create a parameterized jenkins job that does the work. Using this approach has advantages other than simply making the task available to non vmpooler SMEs; it also allows us to keep a history of maintenance task successes and failures since all of our jenkins masters report build statistics to the Jenkins Run Historian database; additionally, it lets us report maintenance task results to the 'vmpooler' hipchat channel; and finally, it opens up the possibility to use kerminator to run maintenance tasks since kerminator has support for triggering jenkins jobs.\n\n/cc [~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1] [~accountid:557058:295d7a84-a09b-4348-8961-a1e1764c190e] [~accountid:63d40628f6e1b543161789a7]", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10034", "fixedVersions": [], "id": "10034", "issueType": "New Feature", "key": "POOLER-77", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "resolution": "Fixed", "resolutionDate": "2018-07-16T18:28:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Automate common vmpooler administration tasks in a way that is accessible to vmpooler developers", "timeSpent": "PT0S", "updated": "2018-07-16T18:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Yesterday, I accidentally upgraded ESXi compatibility version for the centos-7-x86_64 template, and the change resulted in the template no longer creating a bootable system. I used the last old template to restore. I believe this is probably part of the problem.\n\nI'm going to create an updated template that has its software updated and is using the current bootstrap script, which I hope will help with this. I'm starting that process now and hope to have an update by 1.", "created": "2017-02-22T12:56:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "After chatting with [~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] I determined we should source the rebuild from the packer image. I'm working on that, and have a template, but I find that the vmtoolsd stuff is not returning hostname information, and therefore bootstrapping fails. I'm still attempting to determine why.", "created": "2017-02-22T14:59:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I deployed a new template and have had some success with it, in terms of vmpooler interacting with it as expected. I see some failures where bootstrapping fails with an error that prevents sending the hostname due to assumptions around network interfaces. I'm not sure why this is different than the previous template, but it looks like it is. In some cases VMs do populate ddns and those move to the ready queue. I welcome any suggestions re: changing the network configuration to be more predictable, or tuning the bootstrap template to be more resilient to the changes.\n\nSince this image is based on the latest packer build instead of our old template I hope that this should address the issue with hosts disappearing from the network. I will await an update to find out whether that issue is resolved, and continue to work on the intermittent bootstrapping issue.", "created": "2017-02-22T16:14:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "i'm not familar with the lifecyle of the current centos-7-x86_64 template, but i've submitted a [puppetlabs-packer PR|https://github.com/puppetlabs/puppetlabs-packer/pull/182] that should address some issues.  It updates the build logic so it matches the current pattern we're using for shipping Linux artifacts to vSphere for use with the vmpooler.\n\n* Use open-vm-tools instead of installing from the tools ISO.\n* Use vmxnet3 for vSphere artificats\n* vmpooler bootstrap and config file fixups due to networking changes\n* Switch to the AIO method for provisioning.\n\nI'm still waiting to get a build through jenkins-imaging, but a local build appears to behave as expected", "created": "2017-02-23T07:16:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "[~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] i've shipped a new artifact to vsphere based on the PR above.\u2002\u2002it's packer/centos-7.2-x86_64-0.0.3 on vmware-vc2.  i ran a quick smoketest with beaker and it appears to behave as expected ( clones, bootstraps, and tests remote connectivity ).\u2002\u2002wng9tu3bs48tes2 ( in the dynamic/ directory on vmware-vc2 ) is the host it created if you want to poke around.  i feel fairly confident that we can swap this template in for the one that's currently in production.", "created": "2017-02-23T10:19:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I went ahead and moved the template I had deployed aside with a pooler-76 label on it, and deployed this one its place. New clones should be using this template. Thanks [~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b]!", "created": "2017-02-23T10:24:00.000000"}, {"author": "557058:3da95088-202d-4082-a15a-eb2e812f2578", "body": "[~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1] will be conducting a post mortem for this issue", "created": "2017-02-23T13:19:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "QE is starting this post mortem. Follow along here: https://confluence.puppetlabs.com/display/SRE/Post-mortem%3A+centos-7-x86_64+vmpooler+template+broke+2017-02-22.", "created": "2017-02-24T09:13:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This was fixed with deployment of a template based on the latest centos 7.2 packer build with the configuration changes described [here|https://github.com/puppetlabs/puppetlabs-packer/pull/182]. ", "created": "2017-02-24T17:09:00.000000"}], "components": ["VM Pooler"], "created": "2017-02-22T12:54:00.000000", "creator": "557058:7ed56b44-735e-4cdb-b168-470ef48d5112", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@b6ae92c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzc07z:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "22/Feb/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_111395_*|*_3_*:*_1_*:*_77237800_*|*_10009_*:*_1_*:*_110537682_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_78517"}], "description": "As of this morning, [~accountid:70121:b72c16a2-594d-45f8-90bc-c6bc6c9510a8] and I have both had centos-7-x86_64 hosts suddenly disappear from the network after some amount of time (guess: 20m or so). DNS for the host no longer resolves, but we can query the state of the VM and the pooler states the system is still running. \n\nI logged into vSphere and was able to find my running VM and logged into the console. What I found was that the system no longer had an ipv4 address - there was only an ipv6 one. \n\nBranan did some further investigation on his system and found that the DHCP client had crashed. He was also unable to restart the network-mananger service. He traced this down to a package update to libgudev that *should* pull in updates to glib but apparently there is no package dependency. After updating glib, he was able to re-start the network-manager service again. However, he's not sure this completely resolves the issue, as it doesn't explain why the DHCP client crashed. \n\nFinally, [~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1] noted that yesterday [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] may have made a change to the centos-7-x86_64 vm template that involved rolling it back to a previous version. That could very well have a role in this as well.\n\nThese are all the details I know about so far - to anyone involved, please correct me if I'm misrepresenting anything.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10079", "fixedVersions": [], "id": "10079", "issueType": "Bug", "key": "POOLER-76", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Blocker", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7ed56b44-735e-4cdb-b168-470ef48d5112", "resolution": "Fixed", "resolutionDate": "2017-02-24T17:06:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Seeing centos-7-x86_64 vmpooler hosts disappear from network", "timeSpent": "PT0S", "updated": "2017-02-27T12:12:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "attachments": [], "comments": [{"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Fixed finally in https://github.com/puppetlabs/vmpooler/commit/1fcb19bd7bedda0d930af743e43ae1ae5b79dd6f", "created": "2017-05-31T17:53:00.000000"}], "components": [], "created": "2017-02-04T21:20:00.000000", "creator": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4304579"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbqyn:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_10006383686_*|*_5_*:*_1_*:*_0"}], "description": "Currently there are very view unit tests which makes it extremely difficult to do any development work on VM Pooler.\n\nThis ticket will create unit tests so that the existing (not necessarily correct) behaviour of VM Pooler is documented and will assist in refactoring efforts in POOLER-70", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10077", "fixedVersions": [], "id": "10077", "issueType": "Task", "key": "POOLER-73", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "resolution": "Fixed", "resolutionDate": "2017-05-31T17:53:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Create unit tests to document current Pooler behaviour prior to  BackingServices refactor", "timeSpent": "PT0S", "updated": "2017-05-31T17:53:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:fbe4227a-4ec7-455f-b2f1-063da2b851c1", "body": "[~accountid:557058:750ff3bd-7564-4d8a-b480-b500b85be583], reassigning to SRE.", "created": "2017-02-09T16:24:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "[~accountid:557058:5ce68c8f-5f7d-4a7e-8519-7c419bdf6365], were you ever able to get this working?", "created": "2017-04-17T14:59:00.000000"}, {"author": "557058:5ce68c8f-5f7d-4a7e-8519-7c419bdf6365", "body": "[~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] unfortunately not, I was able to sidestep it with help from the SC&R team i.e. someone else getting a token and passing it over to me.  Obviously not ideal but meant I was unblocked, nothing has changed to suggest this would now work so I never re-attempted, absolutely willing to if someone feels it should now be corrected.", "created": "2017-04-18T04:34:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "What instance are you trying to auth against? Are you using a client or interacting with the API directly?", "created": "2017-04-18T10:31:00.000000"}, {"author": "557058:5ce68c8f-5f7d-4a7e-8519-7c419bdf6365", "body": "sorry for the delay [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72], only trying to resolve this now.  Using this command:\n\nfloaty token get --user nick.shaw --url https://vmpooler.mycompany.net/api/v1\n\nI'm prompted for my password, it seems to hang for a while and I get error: execution expired\n\nHere's the trace:\n\n\n{code:java}\n/Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/net/http.rb:880:in `initialize': execution expired (Faraday::ConnectionFailed)\n\tfrom /Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/net/http.rb:880:in `open'\n\tfrom /Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/net/http.rb:880:in `block in connect'\n\tfrom /Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/timeout.rb:101:in `timeout'\n\tfrom /Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/net/http.rb:878:in `connect'\n\tfrom /Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/net/http.rb:863:in `do_start'\n\tfrom /Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/net/http.rb:852:in `start'\n\tfrom /Users/nick.shaw/.rvm/rubies/ruby-2.3.3/lib/ruby/2.3.0/net/http.rb:1398:in `request'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/adapter/net_http.rb:80:in `perform_request'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/adapter/net_http.rb:38:in `block in call'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/adapter/net_http.rb:85:in `with_net_http_connection'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/adapter/net_http.rb:33:in `call'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/request/authorization.rb:37:in `call'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/request/url_encoded.rb:15:in `call'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/rack_builder.rb:141:in `build_response'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/connection.rb:386:in `run_request'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/faraday-0.12.2/lib/faraday/connection.rb:186:in `post'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/vmfloaty-0.7.8/lib/vmfloaty/auth.rb:10:in `get_token'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/vmfloaty-0.7.8/lib/vmfloaty.rb:484:in `block (2 levels) in run'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/commander-4.3.8/lib/commander/command.rb:178:in `call'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/commander-4.3.8/lib/commander/command.rb:153:in `run'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/commander-4.3.8/lib/commander/runner.rb:428:in `run_active_command'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/commander-4.3.8/lib/commander/runner.rb:68:in `run!'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/commander-4.3.8/lib/commander/delegates.rb:15:in `run!'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/vmfloaty-0.7.8/lib/vmfloaty.rb:565:in `run'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/gems/vmfloaty-0.7.8/bin/floaty:7:in `<top (required)>'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/bin/floaty:22:in `load'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/bin/floaty:22:in `<main>'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/bin/ruby_executable_hooks:15:in `eval'\n\tfrom /Users/nick.shaw/.rvm/gems/ruby-2.3.3/bin/ruby_executable_hooks:15:in `<main>'\n{code}\n", "created": "2017-08-03T06:55:00.000000"}], "components": [], "created": "2017-02-03T09:11:00.000000", "creator": "557058:5ce68c8f-5f7d-4a7e-8519-7c419bdf6365", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5201391e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hym4cv:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "09/Feb/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_20984443986_*|*_6_*:*_1_*:*_0"}], "description": "As mentioned, attempting to get a VMPooler token and after entering my password I get:\n\nHTTP 401: There was a problem requesting a token:\n{\"ok\"=>false}\n\nusername is nick.shaw\n\nAs I understand it this should work but I'm a new employee so perhaps I've missed an access request I need to raise?", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10066", "fixedVersions": [], "id": "10066", "issueType": "Bug", "key": "POOLER-75", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:5ce68c8f-5f7d-4a7e-8519-7c419bdf6365", "resolution": "Fixed", "resolutionDate": "2017-10-04T07:12:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Unauthorised response when attempting to get token from VMPooler", "timeSpent": "PT0S", "updated": "2017-10-04T07:12:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "attachments": [], "comments": [{"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "https://github.com/puppetlabs/puppetlabs-modules/pull/7162", "created": "2017-05-31T17:51:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Resolved in https://github.com/puppetlabs/vmpooler/commit/1fcb19bd7bedda0d930af743e43ae1ae5b79dd6f\n", "created": "2017-05-31T17:52:00.000000"}], "components": [], "created": "2017-01-26T16:01:00.000000", "creator": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Acceptance Criteria", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:textarea", "value": "- Additional spec tests for the Dummy backing service"}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@73e76f47"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbjlr:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_10803037373_*|*_5_*:*_1_*:*_0"}], "description": "It is difficult to do development on VM Pooler as it requires a VSphere environment for VM creation etc.\n\nThis improvement will builds on the new per-pool backing service (POOLER-70) to implement a Dummy backing service which behaves like VSphere but will just keep a VM registry in memory.\n\nIdeally we should be able to inject failure into the system to see how VM Pooler behaves", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10030", "fixedVersions": [], "id": "10030", "issueType": "Improvement", "key": "POOLER-72", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "resolution": "Fixed", "resolutionDate": "2017-05-31T17:52:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Create Dummy VM Backing service", "timeSpent": "PT0S", "updated": "2017-05-31T17:52:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "attachments": [], "comments": [{"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "PR Raised \nhttps://github.com/puppetlabs/vmpooler/pull/180", "created": "2017-02-04T21:16:00.000000"}], "components": [], "created": "2017-01-26T15:58:00.000000", "creator": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@27cdd9f3"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbjlb:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_3219692110_*|*_6_*:*_1_*:*_0"}], "description": "It is difficult to do local development as VMPooler requires an LDAP service for authentication.  This improvement will add a dummy authentication provider.\n\nFor testing purposes it is good to have a passing and failing scenario.  In this case the dummy provider should pass if the username and password are different, and fail if the username and password are the same.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10099", "fixedVersions": [], "id": "10099", "issueType": "Improvement", "key": "POOLER-71", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "resolution": "Fixed", "resolutionDate": "2017-03-04T22:20:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Create a Dummy Authentication Provider", "timeSpent": "PT0S", "updated": "2017-03-04T22:20:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "attachments": [], "comments": [{"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "https://github.com/puppetlabs/vmpooler/pull/189", "created": "2017-03-02T16:44:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "I thought I would document my plans for what and how to achieve the refactoring:\n\n* The refactor would move the vSphere specific code out of the pool_manager into a VM Provider class\n\n* The refactor would initially retain the existing behavior of one instance of the vSphere provider per pool.  Ultimately the goal is to have only one VM Provider instance for all pools, but having this intermediate step makes it easier to transition such a large code change.  \n\n* The refactor would initially just reference the code in the VSphere helper class and then slowly move the code from the helper into the VM Provider proper.  This is intermediate step is used to make it easier transition such a large code change.\n\n* This refactor should not block any work on improving the vSphere helper or Pool Manager itself.  Indeed it is expected that as part of the refactor errors in the logic in both the helper and pool manager will be uncovered and fixed in other issue tickets and PRs.\n\n* Ideally this refactor should make as few behaviour changes to the Pool Manager as possible.  It is hoped that the only code changes will be due to making the code easier to test.", "created": "2017-03-04T22:09:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Commited into master at", "created": "2017-05-31T17:51:00.000000"}], "components": [], "created": "2017-01-26T15:56:00.000000", "creator": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Acceptance Criteria", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:textarea", "value": "- Existing spec tests pass\n- Create spec tests for the backing service\n- No functional changes to the API, and ideally, the configuration file."}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@865c953"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbjiv:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_3026843089_*|*_10007_*:*_1_*:*_35512376600_*|*_3_*:*_1_*:*_7776394247_*|*_6_*:*_1_*:*_0"}], "description": "VMPooler is tightly integrated into the VSphere APIs.  This ticket will be to untangle the VSphere dependencies and move this logic into separate classes i.e. Per-Pool VM Backing Services.\n\nWhereby each Pool can be backed by a different service be it, VSphere, OpenStack or a Dummy service for testing.\n\nThis ticket is only concerned with refactoring the existing VSphere code into a VSphere backing service.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10134", "fixedVersions": [], "id": "10134", "issueType": "Task", "key": "POOLER-70", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "resolution": "Done", "resolutionDate": "2018-07-16T18:23:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Refactor VM allocation to Per pool backing services", "timeSpent": "PT0S", "updated": "2018-07-16T18:23:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "If this is effectively managed in the application do you still think it's necessary to support deleting a ready VM from the API?", "created": "2018-04-18T17:18:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "Sorry I didn't understand the \"effectively managed\" part? As in we should never get into that state?", "created": "2018-05-16T00:27:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Yes, sorry for the lack of clarification. By \"effectively managed\" I mean to say a VM that is ready, but not reachable, should be identified and resolved automatically by vmpooler so a user should never have to attempt to delete a machine that is in the ready state, but not reachable.\n\nIt is now possible for a user to change a pool size, so a user could flush a pool without having to delete specific VMs. I expect that this should not be necessary though, so please do report it if you see issues like this occurring still.", "created": "2018-07-16T18:31:00.000000"}], "components": [], "created": "2017-01-19T12:18:00.000000", "creator": "557058:91233464-4152-4228-81dd-172d43a52a03", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@a2bf6c3"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbd6v:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "18/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_52009387098_*|*_5_*:*_1_*:*_0"}], "description": "Using the /vm/:hostname API, you can only delete VMs that have been checked out, but not VMs that are in the ready queue.\n\n{code:ruby}\n        if backend.srem('vmpooler__running__' + rdata['template'], params[:hostname])\n          backend.sadd('vmpooler__completed__' + rdata['template'], params[:hostname])\n\n          status 200\n          result['ok'] = true\n        end\n{code}\n\nThis is a problem when one of the VMs in the ready queue is dead, see POOLER-64, and you don't want to delete all of the vms in the pool (by checking them out) or restarting the pooler, which requires a maintenance window.\n\n", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10063", "fixedVersions": [], "id": "10063", "issueType": "Bug", "key": "POOLER-69", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:91233464-4152-4228-81dd-172d43a52a03", "resolution": "Won't Do", "resolutionDate": "2018-09-13T12:21:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Can't delete a ready vm", "timeSpent": "PT0S", "updated": "2018-09-13T12:21:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Investigating this further I'm not convinced we need an intensive method like find_vm_heavy. There is a search index available of inventory path items and we have all data necessary to locate VMs by inventory path. My preliminary testing shows that it's about 9 times faster to simply retrieve the object by its inventory path rather than look for it by DNS name. I'm going to proceed with replacing this and put up a PR for the change. It should be much easier on the appliance to find objects this way.", "created": "2018-04-16T17:31:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've put up a pull request that replaces find_vm_heavy and rips out the propertyCollector usage. I don't think it's needed at all and is way heavier on the appliance than anything we need to do as a part of managing the lifecycle of VMs in vmpooler. If there's something I've missed here please let me know.", "created": "2018-04-17T11:57:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "There is still some usage of propertyCollector in the disk related operations. I will need to dig into these and remove them as well.", "created": "2018-05-22T16:41:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I opened [https://github.com/puppetlabs/vmpooler/pull/260]\u00a0to remove the remaining instance of propertyCollector in the add_disk operations.", "created": "2018-06-04T13:02:00.000000"}], "components": [], "created": "2017-01-18T16:45:00.000000", "creator": "557058:91233464-4152-4228-81dd-172d43a52a03", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@45f3ebdb"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbc4f:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "16/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_39138248974_*|*_3_*:*_1_*:*_66466146_*|*_10009_*:*_1_*:*_3708508468_*|*_5_*:*_1_*:*_0"}], "description": "The Pooler method [{{find_vm_heavy}}|https://github.com/puppetlabs/vmpooler/blob/b7c370f064587eb7d8ffcedced045671ee6b8e5e/lib/vmpooler/vsphere_helper.rb#L314-L319] uses the vsphere {{PropertyCollector}} API to lookup a VM:\n\n{code:ruby}\n@connection = RbVmomi::VIM.connect(...)\n...\npropertyCollector = @connection.propertyCollector\n{code}\n\nThe {{find_vm_heavy}} method can be called by each of the per-pool threads, however, the vsphere documentation says that the PropertyCollector isn't thread-safe:\n\n{quote}\nA vSphere server creates a default PropertyCollector for every session, and allows you to create multiple, additional PropertyCollector objects. Create additional PropertyCollector objects, using one per thread, to perform mutually independent collection operations.\n{quote}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10162", "fixedVersions": [], "id": "10162", "issueType": "Bug", "key": "POOLER-68", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:91233464-4152-4228-81dd-172d43a52a03", "resolution": "Fixed", "resolutionDate": "2018-05-30T10:05:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pooler's PropertyCollector use is not thread-safe", "timeSpent": "PT0S", "updated": "2018-06-04T13:02:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": [], "created": "2017-01-18T16:40:00.000000", "creator": "557058:91233464-4152-4228-81dd-172d43a52a03", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@46c3aebe"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbc47:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_52080170222_*|*_5_*:*_1_*:*_0"}], "description": "The pooler uses inefficient vsphere APIs to manage objects, e.g. {{VirtualMachine}}, resulting in the entire object being serialized, even though we often just need to know the name, e.g. discover all of the VM names in the {{redhat-7-x86_64}} pool.\n\nFor example:\n\n{code:ruby}\nbase = base.childEntity.find { |f| f.name == pool }\n{code}\n\nThe {{childEntity}} call will cause vsphere to return \"full\" objects for the parent {{base}}'s children, even though we really only need to know the path to the pool whose name matches.\n\nSimilarly, we traverse objects:\n\n{code:ruby}\n      source_host = vm.summary.runtime.host\n{code}\n\nBehind the scenes, rbvmomi will load the full {{VirtualMachineSummary}}, {{VirtualMachineRuntimeInfo}}, and {{HostSystem}} objects.\n\nA better approach is to use the {{PropertyCollector}} based inventory APIs. It allows a single query to specify the traversal path and selection criteria (much like a SQL statement {{select name from table t where t.parent == 'something'}}), and only return a projection of the data. The data can then be accessed as {{vm['summary.runtime.host']}}.\n\nFor example the perfdata sample in rbvmomi can export all of the vm, cluster, resourcepool, etc information for our vsphere in just a few seconds.\n\nThe pooler does use the {{PropertyCollector}} in {{find_vm_heavy}}, but it is not thread-safe. I will find a separate ticket on that.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10075", "fixedVersions": [], "id": "10075", "issueType": "Bug", "key": "POOLER-67", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:91233464-4152-4228-81dd-172d43a52a03", "resolution": "Won't Do", "resolutionDate": "2018-09-13T12:23:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pooler should use inventory APIs to search vsphere", "timeSpent": "PT0S", "updated": "2018-09-13T12:23:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have put up a PR that implements optional purging for vms and folders that are not configured and are in the base path.", "created": "2018-07-05T18:04:00.000000"}], "components": [], "created": "2017-01-17T16:19:00.000000", "creator": "557058:91233464-4152-4228-81dd-172d43a52a03", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@66f038c1"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzbadz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "06/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_11659_*|*_10007_*:*_1_*:*_271056624_*|*_10009_*:*_1_*:*_927640578_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_46140337643"}], "description": "If a pool is removed from vmpooler's configuration, then:\n\n1. ready VMs in the pool are never deleted\n2. running VMs are never deleted since we don't check them against their TTL, eg here are two VMs checked out in early 2016 that are still active (one is powered down, one is still running):\n\n{noformat}\n$ redis-cli hmget vmpooler__vm__hv1h4ma9milkcrl template lifetime checkout tag:jenkins_build_url\n1) \"fedora-20-x86_64\"\n2) \"12\"\n3) \"2016-03-24 16:35:14 -0700\"\n4) \"https://phoenix.delivery.puppetlabs.net/job/platform_strings_acceptance-master/label=beaker,platform=fedora20/83/\"\n$ redis-cli hmget vmpooler__vm__rs4uzoxohi9a2k7 template lifetime checkout tag:jenkins_build_url\n1) \"fedora-20-x86_64\"\n2) \"12\"\n3) \"2016-02-19 19:31:28 -0800\"\n4) \"https://phoenix.delivery.puppetlabs.net/job/platform_strings_acceptance-master/label=beaker,platform=fedora20/82/\"\n{noformat}\n\nAnd vmpooler thinks the vms are active (so it counts against the {{running}} count):\n\n{noformat}\n$ redis-cli hgetall vmpooler__active__fedora-20-x86_64 rs4uzoxohi9a2k7 hv1h4ma9milkcrl\n 1) \"rs4uzoxohi9a2k7\"\n 2) \"2016-02-19 19:31:28 -0800\"\n 3) \"q8qroep21rv3usm\"\n 4) \"2016-03-21 18:09:34 -0700\"\n 5) \"hv1h4ma9milkcrl\"\n 6) \"2016-03-24 16:35:14 -0700\"\n 7) \"bqvp9cfrlfyrdkg\"\n 8) \"2016-03-24 23:14:41 -0700\"\n 9) \"fl04m1bnb3d7mv9\"\n10) \"2016-03-25 09:54:43 -0700\"\n{noformat}\n\nNeither VMs are reachable via its IP, but {{hv1h4ma9milkcrl}} is still running as can be seen through the vcenter console, and {{rs4uzoxohi9a2k7}} is powered down but still present.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10132", "fixedVersions": [], "id": "10132", "issueType": "Bug", "key": "POOLER-66", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:91233464-4152-4228-81dd-172d43a52a03", "resolution": "Fixed", "resolutionDate": "2018-07-19T15:03:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "VMs are not deleted when pool is deleted", "timeSpent": "PT0S", "updated": "2018-07-19T15:03:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [{"attacher": "63d40628f6e1b543161789a7", "created": "2017-01-13T13:27:00.000000", "name": "vmpooler-cinext-lost.txt", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10000"}, {"attacher": "63d40628f6e1b543161789a7", "created": "2017-01-13T15:50:00.000000", "name": "vmpooler-lost-discovered.txt", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10001"}, {"attacher": "63d40628f6e1b543161789a7", "created": "2017-01-13T13:27:00.000000", "name": "vmpooler-lost.txt", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10002"}], "comments": [{"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "I've attached files listing the VMs that were \"lost\" by each vmpooler instance. The VMs are all running in the {{acceptance1}} cluster and are in the \"Discovered virtual machine\" folder, accounting for 596 of the total 2045 hosts powered on in that cluster.\n\n{noformat}\n$ cat vmpooler-lost.txt | wc -l\n     529\n$ cat vmpooler-cinext-lost.txt | wc -l\n      67\n{noformat}", "created": "2017-01-13T13:31:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "Looks like this happened back in June too, and we've had a VM running since then:\n\n{noformat}\n/var/log/vmpooler.log.1:[2016-06-09 02:42:34] [!] [centos-7-x86_64] 'vojwqilmp10jdfq' not found in inventory, removed from 'completed' queue\n$ ping vojwqilmp10jdfq\nPING vojwqilmp10jdfq.delivery.puppetlabs.net (10.32.112.58): 56 data bytes\n{noformat}", "created": "2017-01-13T13:43:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "This lists all of the vms that vmpooler and vmpooler-cinext lost, and that are also in the \"discovered virtual machines\" folder (628 total). These can safely be deleted.", "created": "2017-01-13T15:51:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "Commit https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L584-L586 added an error message to at least record when there is an exception, but the code continues along, which can lead to the behavior described here. Also interesting is I'm seeing these messages:\n\n{noformat}\n[2016-12-03 09:32:33] [!] [centos-6-x86_64] _check_pool failed with an error while inspecting inventory: ManagedObjectNotFound: The object has already been deleted or has not been completely created\n{noformat}\n\nwhich occurs when trying to enumerate the folder's children {{base.childEntity.each}} and one of the child VMs is deleted/created. There are also connection refused messages:\n\n{noformat}\n[2016-12-01 15:27:31] [!] [oracle-6-i386] _check_pool failed with an error while inspecting inventory: Connection refused - Connection refused\n{noformat}\n\nWe have since rolled vmpooler back to a stable sha {{8286ec2854bae2c02cdab9da1e7cdb40bd821531}}, but it t least shows that exceptions do happen while collecting inventory, and we're silently ignoring them.", "created": "2017-01-19T16:25:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This behavior has changed. If a exception occurs when checking inventory vmpooler will log that there was an error and then return pool_check_response, which allows the loop calling check_pool to determine whether to back off before trying again. This should prevent the discovered block that forgets about VMs from ever being reached unless inventory retrieval was successful.", "created": "2018-07-03T12:58:00.000000"}], "components": ["VM Pooler"], "created": "2017-01-12T11:35:00.000000", "creator": "557058:91233464-4152-4228-81dd-172d43a52a03", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@497c5835"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzb7c7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_112018_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_46398077232"}], "description": "The pool_manager#check_pool method is called to walk each VM in vsphere for a given pool, and determine if the VM is in the {{vmpooler\\_\\_running\\_\\_\\*}}, {{vmpooler\\_\\_ready\\_\\_\\*}}, etc sets in redis. Any VM which is not in one of those sets, is considered to be unmanaged and the pooler will delete all information about that VM, effectively forgetting about it.\n\nThe problem is that if vmpooler fails to connect to vsphere, eg due to connection limits, then vmpooler rescues the exception and swallows it:\n\n{code:ruby}\n      # INVENTORY\n      inventory = {}\n      begin\n        base = $vsphere[pool['name']].find_folder(pool['folder'])\n\n        base.childEntity.each do |vm|\n          ...\n          inventory[vm['name']] = 1\n        end\n      rescue\n      end\n{code}\n\nLater in the same method, vmpooler will \"forget\" about any completed VM not in inventory:\n\n{code:ruby}\n      # COMPLETED\n      $redis.smembers('vmpooler__completed__' + pool['name']).each do |vm|\n        if inventory[vm]\n          begin\n            destroy_vm(vm, pool['name'])\n          rescue\n            $logger.log('s', \"[!] [#{pool['name']}] '#{vm}' destroy appears to have failed\")\n            $redis.srem('vmpooler__completed__' + pool['name'], vm)\n            $redis.hdel('vmpooler__active__' + pool['name'], vm)\n            $redis.del('vmpooler__vm__' + vm)\n          end\n        else\n          $logger.log('s', \"[!] [#{pool['name']}] '#{vm}' not found in inventory, removed from 'completed' queue\")\n          $redis.srem('vmpooler__completed__' + pool['name'], vm)\n          $redis.hdel('vmpooler__active__' + pool['name'], vm)\n          $redis.del('vmpooler__vm__' + vm)\n        end\n      end\n{code}\n\nThis results in the VM continuing to run in vsphere, but vmpooler doesn't know anything about it:\n\n{noformat}\n$ grep et21x83oz2c3ubl vmpooler.log\n[2016-12-03 02:28:25] [ ] [redhat-7-x86_64] 'et21x83oz2c3ubl' is being cloned from 'redhat-7-x86_64'\n[2016-12-03 02:29:17] [+] [redhat-7-x86_64] 'et21x83oz2c3ubl' cloned from 'redhat-7-x86_64' in 51.61 seconds\n[2016-12-03 02:32:44] [>] [redhat-7-x86_64] 'et21x83oz2c3ubl' moved to 'ready' queue\n[2016-12-03 02:42:09] [ ] [redhat-7-x86_64] 'et21x83oz2c3ubl' is running on ...\n[2016-12-03 02:53:18] [!] [redhat-7-x86_64] 'et21x83oz2c3ubl' not found in inventory, removed from 'completed' queue\n$ ping et21x83oz2c3ubl\nPING et21x83oz2c3ubl.delivery.puppetlabs.net (10.32.118.34) 56(84) bytes of data.\n64 bytes from et21x83oz2c3ubl.delivery.puppetlabs.net (10.32.118.34): icmp_req=1 ttl=63 time=0.319 ms\n{noformat}\n\nThere are 552 VMs running in this state, all seem related to the Dec outages.\n\n/cc [~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1], [~accountid:557058:42ee807e-7bdc-4d08-8c59-b269f42cee43], [~accountid:63d40635a05386069cdb69d6], [~accountid:557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241], [~accountid:557058:0f255f44-5dce-4849-80f5-c0db60d9b049], [~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0]\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10007", "fixedVersions": [], "id": "10007", "issueType": "Bug", "key": "POOLER-65", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:91233464-4152-4228-81dd-172d43a52a03", "resolution": "Fixed", "resolutionDate": "2018-07-03T12:58:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pooler leaks completed VMs if there is an exception when gathering inventory", "timeSpent": "PT0S", "updated": "2018-07-03T12:58:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "The following hosts are unreachable:\n\n{noformat}\n$ echo \"avp38wagql691vu\nfgf4cccgwvt0ntl\njxldchet8fx49nw\nl85z96lviz3gg8r\npwiyt19rh9w4lrs\nwvh0ysr4ltxsd46\" | while read line", "created": "2017-01-17T16:00:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "Also [~accountid:557058:79f2fdd6-baee-43c4-91af-c7e647173c95] mentioned he was seeing this with some PE integration pipelines:\n\nhttps://jenkins-master-prod-1.delivery.puppetlabs.net/view/pe-integration/view/pe-2016.4.x-weekend/job/enterprise_pe-acceptance-tests_integration-system_pe_full-agent-upgrade-secondary-frictionless_weekend_2016.4.x/1/LAYOUT=centos6-64mcd-solaris10-64f,LEGACY_AGENT_VERSION=NONE,PLATFORM=NOTUSED,SCM_BRANCH=2016.4.x,UPGRADE_FROM=2015.2.3,UPGRADE_TO_VERSION=NONE,label=beaker-bigjob/console\n\n{noformat}\n$ sudo grep fit1y68xorm9z5e /var/log/vmpooler.log\n[2016-10-28 12:26:56] [ ] [solaris-10-x86_64] 'fit1y68xorm9z5e' is being cloned from 'solaris-10-x86_64'\n[2016-10-28 12:27:54] [+] [solaris-10-x86_64] 'fit1y68xorm9z5e' cloned from 'solaris-10-x86_64' in 57.28 seconds\n[2016-10-28 12:29:17] [>] [solaris-10-x86_64] 'fit1y68xorm9z5e' moved to 'ready' queue\n[2017-01-16 11:39:15] [!] [solaris-10-x86_64] 'fit1y68xorm9z5e' not found in inventory, removed from 'completed' queue\n{noformat}\n\nhttps://jenkins-master-prod-1.delivery.puppetlabs.net/view/pe-integration/view/pe-2016.4.x/job/enterprise_pe-acceptance-tests_integration-system_pe_full-agent-upgrade-secondary_nightly_2016.4.x/LAYOUT=centos6-64mcd-fedora24-32f-64f,LEGACY_AGENT_VERSION=NONE,PLATFORM=NOTUSED,SCM_BRANCH=2016.4.x,UPGRADE_FROM=2016.4.2,UPGRADE_TO_VERSION=NONE,label=beaker-bigjob/1/console\n\n{noformat}\n$ sudo grep r35vo85n7nuv37r /var/log/vmpooler.log\n[2016-11-09 22:33:47] [ ] [fedora-24-x86_64] 'r35vo85n7nuv37r' is being cloned from 'fedora-24-x86_64'\n[2016-11-09 22:33:53] [+] [fedora-24-x86_64] 'r35vo85n7nuv37r' cloned from 'fedora-24-x86_64' in 6.55 seconds\n[2016-11-09 22:34:38] [>] [fedora-24-x86_64] 'r35vo85n7nuv37r' moved to 'ready' queue\n[2017-01-17 07:58:03] [!] [fedora-24-x86_64] 'r35vo85n7nuv37r' not found in inventory, removed from 'completed' queue\n{noformat}", "created": "2017-01-17T16:02:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "Also [~accountid:557058:6713b848-54a4-4f2e-9636-a24860d9c3f2] saw this in puppet-agent. Note {{wvh0ysr4ltxsd46}} is one of the windows hosts I mentioned in an earlier comment.\n\nhttps://jenkins.puppetlabs.com/job/platform_puppet-agent_intn-van-sys_suite-daily-hiera-stable/SLAVE_LABEL=beaker,TEST_TARGET=windows10ent-32a/224/console\n\n{noformat}\n01:30:23 wvh0ysr4ltxsd46.delivery.puppetlabs.net (windows10ent-32-1) 01:30:03$ cygcheck curl\n01:30:23   Attempting ssh connection to wvh0ysr4ltxsd46.delivery.puppetlabs.net, user: Administrator, opts: {:config=>false, :paranoid=>false, :auth_methods=>[\"publickey\"], :port=>22, :forward_agent=>true, :keys=>[\"~/.ssh/id_rsa-acceptance\"], :user_known_hosts_file=>\"/var/lib/jenkins/.ssh/known_hosts\", :keepalive=>true}\n01:30:23   Warning: Try 1 -- Host wvh0ysr4ltxsd46.delivery.puppetlabs.net unreachable: SocketError - getaddrinfo: Name or service not known\n{noformat}", "created": "2017-01-19T10:54:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "All of the VMs I mentioned earlier are missing the {{check}} property in redis:\n\n{noformat}\n$ echo \"avp38wagql691vu\nfgf4cccgwvt0ntl\njxldchet8fx49nw\nl85z96lviz3gg8r\npwiyt19rh9w4lrs\nwvh0ysr4ltxsd46\" | while read line", "created": "2017-01-19T12:12:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "All of the following pools are not being checked or specific vms haven't been checked recently (within the last 15 minutes):\n\n{noformat}\n$ bundle exec ruby hung.rb vmpooler\nnever checked: win-10-ent-i386 (5 VMs)\nnever checked: win-10-ent-x86_64 (5 VMs)\nnever checked: win-10-pro-x86_64 (5 VMs)\nnever checked: win-2003-i386 (2 VMs)\nnever checked: win-2003-x86_64 (2 VMs)\nnever checked: win-2003r2-i386 (5 VMs)\nnever checked: win-2003r2-x86_64 (5 VMs)\nnever checked: win-2008-x86_64 (5 VMs)\nnever checked: win-2008r2-ch-x86_64 (5 VMs)\nnever checked: win-2008r2-wmf5-bv-x86_64 (5 VMs)\nnever checked: win-2008r2-wmf5-gb-x86_64 (2 VMs)\nnever checked: win-2008r2-wmf5-x86_64 (5 VMs)\nnever checked: win-2008r2-x86_64 (8 VMs)\nnever checked: win-2012-x86_64 (8 VMs)\nnever checked: win-2012r2-core-x86_64 (5 VMs)\nnever checked: win-2012r2-ja-x86_64 (5 VMs)\nnever checked: win-2012r2-wmf5-bv-x86_64 (5 VMs)\nnever checked: win-2012r2-wmf5-x86_64 (5 VMs)\nnever checked: win-2012r2-x86_64 (9 VMs)\nnever checked: win-2016-x86_64 (5 VMs)\nnever checked: win-7-fr-x86_64 (5 VMs)\nnever checked: win-7-x86_64 (5 VMs)\nnever checked: win-8-x86_64 (5 VMs)\nnever checked: win-81-x86_64 (5 VMs)\nnever checked: win-vista-x86_64 (5 VMs)\nnot checked recently: fedora-20-i386 (3 VMs)\n{noformat}\n\n{noformat}\n$ bundle exec ruby hung.rb vmpooler-cinext\nnever checked: win-10-ent-i386 (3 VMs)\nnever checked: win-10-ent-x86_64 (3 VMs)\nnever checked: win-10-pro-x86_64 (3 VMs)\nnever checked: win-2003-i386 (3 VMs)\nnever checked: win-2003-x86_64 (3 VMs)\nnever checked: win-2003r2-i386 (3 VMs)\nnever checked: win-2003r2-x86_64 (3 VMs)\nnever checked: win-2008-x86_64 (3 VMs)\nnever checked: win-2008r2-ch-x86_64 (3 VMs)\nnever checked: win-2008r2-wmf5-gb-x86_64 (3 VMs)\nnever checked: win-2008r2-wmf5-x86_64 (3 VMs)\nnever checked: win-2008r2-x86_64 (3 VMs)\nnever checked: win-2012-x86_64 (3 VMs)\nnever checked: win-2012r2-core-x86_64 (3 VMs)\nnever checked: win-2012r2-wmf5-x86_64 (3 VMs)\nnever checked: win-2012r2-x86_64 (3 VMs)\nnever checked: win-2016-x86_64 (3 VMs)\nnever checked: win-7-fr-x86_64 (3 VMs)\nnever checked: win-7-x86_64 (3 VMs)\nnever checked: win-81-x86_64 (3 VMs)\nnever checked: win-vista-x86_64 (3 VMs)\nnot checked recently: amazon-201403-x86_64 (3 VMs)\nnot checked recently: arista-4-i386 (3 VMs)\nnot checked recently: centos-4-i386 (2 VMs)\nnot checked recently: cisco-exr-9k-x86_64 (3 VMs)\nnot checked recently: cisco-nxos-9k-x86_64 (3 VMs)\nnot checked recently: cisco-wrlinux-5-x86_64 (2 VMs)\nnot checked recently: cisco-wrlinux-7-x86_64 (2 VMs)\nnot checked recently: cumulus-vx-25-x86_64 (1 VMs)\nnot checked recently: debian-6-x86_64 (3 VMs)\nnot checked recently: fedora-14-i386 (2 VMs)\nnot checked recently: fedora-21-i386 (3 VMs)\nnot checked recently: fedora-21-x86_64 (3 VMs)\nnot checked recently: opensuse-11-i386 (3 VMs)\nnot checked recently: opensuse-11-x86_64 (3 VMs)\nnot checked recently: osx-1011-x86_64 (1 VMs)\nnot checked recently: scientific-5-i386 (1 VMs)\nnot checked recently: solaris-112-x86_64 (3 VMs)\nnot checked recently: ubuntu-1504-i386 (3 VMs)\nnot checked recently: ubuntu-1504-x86_64 (3 VMs)\nnot checked recently: vro-6-x86_64 (3 VMs)\nnot checked recently: vro-7-x86_64 (3 VMs)\n{noformat}\n\nMy theory is that we are exceeding the number of simultaneous connects as a result of multiple vmpoolers connecting to vcenter. From https://www.vmware.com/pdf/vsphere5/r55/vsphere-55-configuration-maximums.pdf\n\n\"Concurrent vSphere Web Clients connections to vCenter Server 180\"\n\nProduction vmpooler has 104 pools and vmpooler-cinext has 97 pools, plus there's vmpooler-dev, etc, and each pool makes one connection to vcenter.\n\nIt doesn't appear that we have increased the web connection limit in vcenter (not to be confused with the max db connection limit of 50). So I think we are guaranteed to have some pool threads die given the current configuration.", "created": "2017-01-19T13:05:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "I'm pretty sure the vmpooler-cinext supervisor thread died. It started on\n\n{noformat}\n[2017-01-13 09:50:18] starting vmpooler\n{noformat}\n\nBut there are no {{thread died}} messages:\n\n{noformat}\n$ sudo grep 'thread died' /var/log/vmpooler.log\n$\n{noformat}\n\nBut many pools are not being checked, which means the per-pools threads have also died, and since the supervisor thread died, we never notice/restart the per-pool threads.", "created": "2017-01-19T13:14:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "Found [22 bad VMs in vmpooler|https://gist.github.com/joshcooper/68303cbfbc1b4a9a6ef4b9bde24636b4] and [74 VMs in vmpooler-cinext|https://gist.github.com/joshcooper/805ad5d209696697076aa0fe7fc83e8b]. They've been purged.", "created": "2017-01-24T14:16:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "A few changes have been made to vmpooler since the last update was added here. Vmpooler throttles and shares connections in a connection pool so we should be able to have some confidence that the number of connections in use are limited. Some bug fixes have been made to the code that checks ready VMs and I see ready VMs with issues periodically removed from the ready queue during the course of vmpooler operations. Have you continued to see issues with this?", "created": "2018-04-18T16:39:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "VMs in the ready queue are now verified as running and the pending check is re-performed along with this. This should now adequately detect when a VM has disappeared while in the ready state.", "created": "2018-06-13T17:16:00.000000"}], "components": ["VM Pooler"], "created": "2017-01-09T15:14:00.000000", "creator": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1cb4388e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzb4dr:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Jan/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_246236588_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_44685532956"}], "description": "Old/non-existent VMs are still used to fulfill requests. VMPooler should check to see if a vm in the ready queue is still active, but for some reason that is not happening. Possibly due to max connectetions to vsphere.\n\nExample of Old VM in ready queue attempting to be used:\n{code}[2016-11-19 05:32:48] [ ] [scientific-7-x86_64] 'f8wrefotlnnphe8' is being cloned from 'scientific-7-x86_64'\n[2016-11-19 05:33:01] [+] [scientific-7-x86_64] 'f8wrefotlnnphe8' cloned from 'scientific-7-x86_64' in 13.93 seconds\n[2016-11-19 05:34:02] [>] [scientific-7-x86_64] 'f8wrefotlnnphe8' moved to 'ready' queue\n[2017-01-09 11:41:14] [!] [scientific-7-x86_64] 'f8wrefotlnnphe8' not found in inventory, removed from 'completed' queue{code}\n\n", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10161", "fixedVersions": [], "id": "10161", "issueType": "Bug", "key": "POOLER-64", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "resolution": "Fixed", "resolutionDate": "2018-06-13T17:17:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Non-existent VMs in the ready queue are used to fulfill requests", "timeSpent": "PT0S", "updated": "2018-06-13T17:17:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2017-01-05T16:44:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@488d9cb0"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzb2mn:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_12321_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_16836633641"}], "description": "Once always-be-scheduling is completely delivered (all jobs have been migrated), we need to reduce the pool sizes to that of the largest job running in CI. This is expected to significantly reduce load on the pooler. ", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10060", "fixedVersions": [], "id": "10060", "issueType": "Improvement", "key": "POOLER-63", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Fixed", "resolutionDate": "2017-07-19T14:35:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "On delivery of ABS, reduce pool sizes to that of the largest job", "timeSpent": "PT0S", "updated": "2017-07-19T14:35:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Is this still needed?", "created": "2018-07-16T18:22:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Yes. In my opinion, this would provide some value, although it's not a high priority.\n\nWe should talk about this some before digging in though. The INFC team is looking to do something similar for SLICE.", "created": "2018-07-17T10:10:00.000000"}], "components": ["VM Pooler"], "created": "2017-01-03T11:26:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@75c0bace"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-58"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzazzr:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_778343473_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_84500262929"}], "description": "In this dashboard, http://grafana.ops.puppetlabs.net/dashboard/db/vm-status, the unknown VMs in the VM Running Count by Status section are likely from Devs doing ad hoc work. We need to get better granularity and determine which Devs are setting excessively long TTLs, then follow up with them to make sure this is the best use of resources. Depending on what we find, we may want to set some policy to control this in the future. ", "epicLinkSummary": "All tickets related to updating our preserve on fail policy", "estimate": "PT0S", "externalId": "10057", "fixedVersions": [], "id": "10057", "issueType": "Improvement", "key": "POOLER-62", "labels": [], "originalEstimate": "PT0S", "parent": "10027", "parentSummary": "All tickets related to updating our preserve on fail policy", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Won't Do", "resolutionDate": "2019-09-17T12:56:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Better identify VMs in the unknown state, on the VM Status dashboard", "timeSpent": "PT0S", "updated": "2019-09-17T12:56:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2017-01-03T11:17:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@213d6e05"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-58"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzazz3:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_778840111_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_84500329181"}], "description": "Other tickets in this epic track work required to determine the impact of this, both to process and also to load on the pooler. This ticket is to drive to a conclusion.\n\nIn Pit Crew, we discussed the possibility of disabling preserve on fail by default, and automatically enabling it for matrix reloads. (and also automatically enabling matrix reloads on failures).\n\nAnother option was to disable it by default, and see who complains.", "epicLinkSummary": "All tickets related to updating our preserve on fail policy", "estimate": "PT0S", "externalId": "10131", "fixedVersions": [], "id": "10131", "issueType": "Improvement", "key": "POOLER-61", "labels": [], "originalEstimate": "PT0S", "parent": "10027", "parentSummary": "All tickets related to updating our preserve on fail policy", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Won't Do", "resolutionDate": "2019-09-17T12:57:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Should preserve on fail be set to default?", "timeSpent": "PT0S", "updated": "2019-09-17T12:57:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2017-01-03T11:11:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@66cd940b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-58"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzazxr:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_779249958_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_84500342028"}], "description": "As an alternative to preserve on fail, we should weigh the pro's and cons to either snapshotting on fail, or suspending. We would need to measure the amount of load this may introduce, and determine the expected workflow to retrieve/resume these vm's to see if this is viable. ", "epicLinkSummary": "All tickets related to updating our preserve on fail policy", "estimate": "PT0S", "externalId": "10130", "fixedVersions": [], "id": "10130", "issueType": "Task", "key": "POOLER-60", "labels": [], "originalEstimate": "PT0S", "parent": "10027", "parentSummary": "All tickets related to updating our preserve on fail policy", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Won't Do", "resolutionDate": "2019-09-17T12:57:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Spike on snapshotting failed VMs vs. suspending", "timeSpent": "PT0S", "updated": "2019-09-17T12:57:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:5aed1ed3-f3b3-4dd3-bebb-5b3bf9907bc5", "body": "I'll wait to get tapped in to this ticket before doing anything else", "created": "2017-01-03T18:16:00.000000"}], "components": ["VM Pooler"], "created": "2017-01-03T09:42:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@55938e53"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-58"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzazkn:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "04/Jan/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_784599803_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_84500353685"}], "description": "In Pit Crew, two ideas were proposed to determine how often preserve on fail is used (that is, how often people actually log in to the failed SUT), in order to measure the impact of disabling it by default.\n\n1. Send a survey to dev. [~accountid:557058:5aed1ed3-f3b3-4dd3-bebb-5b3bf9907bc5] has provided this first draft: https://docs.google.com/a/puppet.com/forms/d/e/1FAIpQLSfeTDhM_JYv7Zr02bnvAYxeCslX9QmQtOwOALQksSc17KDj5A/viewform?c=0&w=1\n\n2. Audit SSH logs of VMs in failed state. This is likely too much effort.", "epicLinkSummary": "All tickets related to updating our preserve on fail policy", "estimate": "PT0S", "externalId": "10081", "fixedVersions": [], "id": "10081", "issueType": "Task", "key": "POOLER-59", "labels": [], "originalEstimate": "PT0S", "parent": "10027", "parentSummary": "All tickets related to updating our preserve on fail policy", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Won't Do", "resolutionDate": "2019-09-17T12:57:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Determine how often preserve on fail is used", "timeSpent": "PT0S", "updated": "2019-09-17T12:57:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2017-01-03T09:36:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Team/s", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiselect", "value": "Quality Engineering"}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@e62f234"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Color", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-color", "value": "ghx-label-8"}, {"fieldName": "Epic Name", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-label", "value": "Preserve on Fail policy spike"}, {"fieldName": "Epic Status", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-status", "value": "Done"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Issue color", "fieldType": "com.pyxis.greenhopper.jira:jsw-issue-color", "value": "dark_purple"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzajnj:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_14102_*|*_6_*:*_1_*:*_0_*|*_10010_*:*_1_*:*_85285239157"}], "description": "Preserve on Fail is believed to be causing unnecessary load on the pooler. We need to reconsider how and when this functionality is used, and determine what can be done to have it enabled only when it's helpful for triage.\n\nhttp://grafana.ops.puppetlabs.net/dashboard/db/vm-status\n\nCollecting work for this epic in the POOLER project for lack of a better home. This will likely involve CI and Beaker work once decisions have been made.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10027", "fixedVersions": [], "id": "10027", "issueType": "Epic", "key": "POOLER-58", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Fixed", "resolutionDate": "2019-09-17T12:57:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "All tickets related to updating our preserve on fail policy", "timeSpent": "PT0S", "updated": "2019-09-17T12:57:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [{"attacher": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "created": "2016-12-20T04:50:00.000000", "name": "Screen Shot 2016-12-19 at 18.18.58.png", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10008"}, {"attacher": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "created": "2016-12-20T04:50:00.000000", "name": "Screen Shot 2016-12-20 at 11.49.15.png", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10006"}], "comments": [{"author": "557058:750ff3bd-7564-4d8a-b480-b500b85be583", "body": "Sorry we didn't see this earlier", "created": "2017-01-09T14:52:00.000000"}, {"author": "557058:750ff3bd-7564-4d8a-b480-b500b85be583", "body": "Oh, actually he probably won't be around for a little longer, since paternity leave was extended.", "created": "2017-01-09T14:54:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "currently tracked in QENG-5305", "created": "2017-10-04T10:16:00.000000"}], "components": ["VM Pooler"], "created": "2016-12-20T04:49:00.000000", "creator": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@e67535c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hymfzb:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "09/Jan/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2011823538_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_22887444068"}], "description": "I'm seeing a couple high CPU ready metrics for two blades, see attached grafana \n\nChecked graphite the raw data is the same, can we compare that with the EXi performance graphs somewhere?\n\nthanks\n\nSam", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10023", "fixedVersions": [], "id": "10023", "issueType": "Bug", "key": "POOLER-57", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "resolution": "Duplicate", "resolutionDate": "2017-10-04T10:16:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "CPU ready in acceptance1 cluster", "timeSpent": "PT0S", "updated": "2017-10-04T10:16:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I think this is resolved here [https://github.com/puppetlabs/vmpooler/commits/a6c8c76d310435403ce5862223f5bab699f9c451]\u00a0. We have been running this for a while. Your suggestion appears to have some improvements over what is in place now. Let me know if you think this should be re-opened and those improvements made.", "created": "2018-04-18T16:35:00.000000"}], "components": ["VM Pooler"], "created": "2016-12-16T11:39:00.000000", "creator": "557058:91233464-4152-4228-81dd-172d43a52a03", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@8c8a298"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzasvz:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "18/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2332829951_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_39844501354"}], "description": "VMpooler tests whether a VM is pending or ready by calling code like the following:\n\n{code:ruby}\n        begin\n          Timeout.timeout(5) do\n            TCPSocket.new vm, 22\n          end\n          move_pending_vm_to_ready(vm, pool, host)\n        rescue\n          fail_pending_vm(vm, pool, timeout)\n        end\n{code}\n\nNote {{TCPSocket.new}} makes a blocking connect call, but doesn't explicitly close the socket, so there are large numbers of connections open:\n\n{noformat}\n$ sudo lsof -p 16335 -f | grep :ssh\njava    16335 root   53r  IPv6         2850116387      0t0        TCP hornet.delivery.puppetlabs.net:47112->tulsxjet0e7wmrs.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  138w  IPv6         2850114020      0t0        TCP hornet.delivery.puppetlabs.net:39363->j12v96nk09yr7gy.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  169u  IPv6         2850149094      0t0        TCP hornet.delivery.puppetlabs.net:34085->nl2ldvtfdgqd451.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  221r  IPv6         2850132507      0t0        TCP hornet.delivery.puppetlabs.net:60589->cf8frtg068hf9b8.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  225r  IPv6         2850128297      0t0        TCP hornet.delivery.puppetlabs.net:59494->a9mge4r8lduxaxi.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  226w  IPv6         2850089709      0t0        TCP hornet.delivery.puppetlabs.net:55507->aqxidssr5nmuzwf.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  230u  IPv6         2850133365      0t0        TCP hornet.delivery.puppetlabs.net:53352->m8a7syafs0vepsf.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  244r  IPv6         2850157106      0t0        TCP hornet.delivery.puppetlabs.net:44831->t83ma4jpxdleouf.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  247u  IPv6         2850157200      0t0        TCP hornet.delivery.puppetlabs.net:58383->qs9rz88eu9qnlni.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  255w  IPv6         2850093958      0t0        TCP hornet.delivery.puppetlabs.net:35685->bsa0kdstgd6ocfg.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  260r  IPv6         2850131656      0t0        TCP hornet.delivery.puppetlabs.net:37636->vn8gk6xskzjabgh.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  262u  IPv6         2850186584      0t0        TCP hornet.delivery.puppetlabs.net:52647->vdhnkr1cm2h0m9z.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  264r  IPv6         2850130368      0t0        TCP hornet.delivery.puppetlabs.net:54088->lwzupvrfccrsfh4.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  271w  IPv6         2850167121      0t0        TCP hornet.delivery.puppetlabs.net:37171->k2uyj1jejc3h85q.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  276u  IPv6         2850106069      0t0        TCP hornet.delivery.puppetlabs.net:44826->pxagtyb1tcixdlu.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  277u  IPv6         2850111553      0t0        TCP hornet.delivery.puppetlabs.net:38267->m9tjj341syqsjre.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  281u  IPv6         2850107739      0t0        TCP hornet.delivery.puppetlabs.net:42700->m9fo0jdurbicflh.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  284r  IPv6         2850141843      0t0        TCP hornet.delivery.puppetlabs.net:33964->lb4je9y7pa7zv1i.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  289u  IPv6         2850126635      0t0        TCP hornet.delivery.puppetlabs.net:55425->c22b3miyof4sldd.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  290r  IPv6         2850169762      0t0        TCP hornet.delivery.puppetlabs.net:49440->r701f85v1qt1e4j.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  298r  IPv6         2850165156      0t0        TCP hornet.delivery.puppetlabs.net:59587->jk040bi6755pyu7.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  304u  IPv6         2850130687      0t0        TCP hornet.delivery.puppetlabs.net:38957->v4p29rz76qhinn9.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  332u  IPv6         2850106412      0t0        TCP hornet.delivery.puppetlabs.net:52018->t1ay6fkwl9e6xmj.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  333u  IPv6         2850161059      0t0        TCP hornet.delivery.puppetlabs.net:35628->k2bhuy8ln0kdqt7.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  361w  IPv6         2850151447      0t0        TCP hornet.delivery.puppetlabs.net:49317->c8t2te7lzjpzaen.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  363r  IPv6         2850130841      0t0        TCP hornet.delivery.puppetlabs.net:57684->ia08kujq4ri34wc.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  364w  IPv6         2850115223      0t0        TCP hornet.delivery.puppetlabs.net:43525->o7st2b70q7rfleu.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  368r  IPv6         2850181414      0t0        TCP hornet.delivery.puppetlabs.net:46126->k10xkf65v2qp8ie.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  376u  IPv6         2850151725      0t0        TCP hornet.delivery.puppetlabs.net:54470->j7hssfr0d5ho2lw.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  378w  IPv6         2850167272      0t0        TCP hornet.delivery.puppetlabs.net:35752->h4km5gjwxh3acqt.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  381r  IPv6         2850115402      0t0        TCP hornet.delivery.puppetlabs.net:52501->g4uwioq1ulrv09r.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  382r  IPv6         2850137454      0t0        TCP hornet.delivery.puppetlabs.net:58808->iqzfo2hovnuehmk.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  388r  IPv6         2850133613      0t0        TCP hornet.delivery.puppetlabs.net:49825->ny54nu1u1tzvght.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  390w  IPv6         2850184430      0t0        TCP hornet.delivery.puppetlabs.net:47099->fyyv09yo10e7t76.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  392w  IPv6         2850185451      0t0        TCP hornet.delivery.puppetlabs.net:49948->xn4qxjgz0er2w0p.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  394r  IPv6         2850165287      0t0        TCP hornet.delivery.puppetlabs.net:38786->kjpskyczlq46inv.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  395w  IPv6         2850122225      0t0        TCP hornet.delivery.puppetlabs.net:51431->pd6nnv69ak39mn4.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  398u  IPv6         2850171057      0t0        TCP hornet.delivery.puppetlabs.net:51831->ut5ppc1l7rz0ls7.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  402r  IPv6         2850128924      0t0        TCP hornet.delivery.puppetlabs.net:59980->ujdi90qm8d0yi76.delivery.puppetlabs.net:ssh (ESTABLISHED)\njava    16335 root  405r  IPv6         2850165478      0t0        TCP hornet.delivery.puppetlabs.net:59922->yy3lt6o0l8to6s1.delivery.puppetlabs.net:ssh (ESTABLISHED)\n{noformat}\n\nEvery 5 seconds or so, the sockets are closed. I believe ruby is doing this automagically when the thread that opened the sockets finishes. But we shouldn't rely on someone to close the sockets on our behalf.\n\nAlso using {{Timeout.timeout}} is not recommended, because there's no guarantee it will be able to interrupt the blocking {{connect}} syscall. See http://blog.headius.com/2008/02/rubys-threadraise-threadkill-timeoutrb.html. A better approach is to use a nonblocking connect, e.g.\n\n{code:ruby}\n  def connect_nonblocking\n    sockaddr = Socket.sockaddr_in(@port, @host)\n\n    socket = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)\n    begin\n      socket.connect_nonblock(sockaddr)\n    rescue IO::WaitWritable\n      if IO.select(nil, [socket], nil, 5) # timeout in seconds\n        begin\n          socket.connect_nonblock(sockaddr)\n        rescue Errno::EISCONN\n          # connected\n        end\n      else\n        raise Errno::ETIMEDOUT.new\n      end\n    ensure\n      socket.close\n    end\n  end\n{code}", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10129", "fixedVersions": [], "id": "10129", "issueType": "Bug", "key": "POOLER-56", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:91233464-4152-4228-81dd-172d43a52a03", "resolution": "Fixed", "resolutionDate": "2018-04-18T16:35:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "VMPooler doesn't explicitly close TCP sockets", "timeSpent": "PT0S", "updated": "2018-04-18T16:35:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Notes from a pooler improvements meeting that occurred earlier this year:\n\n* dashboard is 4MB of data, takes a long time to load remotely\n* consider changing the infrastructure to use queues rather than multiple threads in a pool (OPS-9779)\n* customers in the wild are actually using it, beware of impact to open source\n* quality issues exist around how it manages state in redis\n* when it restarts, vm\u2019s in unknown state, appear to be ready but are not\n* should pooler manage allocator, or move to scheduler?\n* ABS needs to be aware of any pooler changes\n* ensure all changes are thoroughly testing for unforeseen consequences", "created": "2016-12-05T11:39:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Notes from today's Pit Crew meeting (capturing here for now, but will likely lead to additional tickets for this epic):\n\nFrom [~accountid:63d40628f6e1b543161789a7] (https://trello.com/c/WqMpWrJQ):\n\nMy theory is that we are not using vsphere efficiently to run CI jobs. For example, we preserve hosts on fail, vmpooler loses track of VMs, devs run jobs on-demand and VMs are left running until their 12hr ttl. I wanted to get a sense for what percentage of \"running\" VMs are doing useful work, which I define to be a VM is running and is associated with a running jenkins job. This is the \"ok\" bucket in the dashboard. All other running VMs are overhead on the system:\n\nRepo: https://github.com/joshcooper/vmstatus\nJenkins Job: https://jenkins-qe.delivery.puppetlabs.net/job/experimental_vmstatus\nDashboard: http://grafana.ops.puppetlabs.net/dashboard/db/vm-status\n\n^^^ tracked in POOLER-58", "created": "2016-12-20T11:10:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Moved remaining tickets to the 2018 epic: POOLER-99", "created": "2018-01-03T14:03:00.000000"}], "components": [], "created": "2016-12-05T11:16:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Team/s", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiselect", "value": "Quality Engineering"}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1bf29c07"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Color", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-color", "value": "ghx-label-8"}, {"fieldName": "Epic Name", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-label", "value": "VMpooler Optimizations 2017"}, {"fieldName": "Epic Status", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-status", "value": "Done"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Issue color", "fieldType": "com.pyxis.greenhopper.jira:jsw-issue-color", "value": "dark_purple"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzajnb:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_5428_*|*_6_*:*_1_*:*_0_*|*_10010_*:*_1_*:*_34051594503"}], "description": "This epic will track all tickets intended for pooler improvements and bug fixes.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10119", "fixedVersions": [], "id": "10119", "issueType": "Epic", "key": "POOLER-51", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Fixed", "resolutionDate": "2018-01-03T14:03:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "All optimization and improvements tickets targeted for 2017", "timeSpent": "PT0S", "updated": "2018-01-03T14:03:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It also looks like pools are more likely to become depleted after the updates for two reasons:\n# Getting host utilization takes longer than getting its VM count\n# The migration step in _check_pool shares the worker thread and vsphere connection with other portions of _check_pool, which is where pools try to refill.\n\nSince we cannot afford more connections the connection management logic really needs to be changed before this should be returned. I'm going to turn off migration at checkout for production vmpooler as a first step.", "created": "2016-12-02T14:28:00.000000"}, {"author": "557058:254cdefb-9532-46cb-b4ac-84c3df97d6af", "body": "[~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] This appears to be happening again as of this morning. Currently blocking our ability to test puppet-agent.\n\n(cc [~accountid:557058:0b16fa29-44bc-4e11-82b0-a346967ad808], [~accountid:557058:b64e0a8c-6f57-44f9-adf1-b0b9767a03da])", "created": "2016-12-03T10:11:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "The issue of pools not refilling appeared to start around 1AM PST. As of a little after 9:30AM PST, pools started to refill. We will continue to monitor the pools throughout the weekend.", "created": "2016-12-03T11:06:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I ended up reverting changes back to {{8286ec2854bae2c02cdab9da1e7cdb40bd821531}}, which is prior to the migration at checkout changes that I made.\n\nAfter having some time to observe the changes and failures I don't think the changes I made will work with the way that we use vmpooler. Specifically, we use a large number of connections. Some of the changes I made allowed vsphere connections to happen faster, and ask a few more questions about VMs and hosts, that combined result in vsphere inventory service tipping over eventually. In general vmpooler is abusive to vsphere and its inventory service, and the changes I implemented exacerbated this issue. When implemented we experienced two inventory service failures and nightly instances of {{connection refused}} being reported during heavy usage periods.\n\nAfter observing these results I think an alternate approach needs to be taken to any further vmpooler changes. What I have in mind is focusing on its connection usage. The biggest issue I see is that expensive operations, such as inspecting the state of hosts in the cluster, are repeated by every clone operation. Additionally, only clone operations are throttled in any way. We should be able to manage all of these operations with a single connection. If multiple connections are indeed advantageous then this state should be managed independently of the number of pools. Vsphere performance increases greatly when it's not being hammered by dozens of connections so there are performance improvements to gain in addition to stability improvements.\n\nIn any case, vmpooler is back to the pre-migration at checkout state, which should allow things to be a little more stable once more.", "created": "2016-12-05T10:07:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "I am closing this ticket as resolved, now that we've return the pooler to its previous state. We will reopen this ticket if issues should return, and we'll keep this handy for when we revisit the effort to improve the pooler.", "created": "2016-12-05T11:14:00.000000"}], "components": ["VM Pooler"], "created": "2016-12-02T13:23:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@72e89206"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzaixz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "5.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Dec/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2943713_*|*_3_*:*_1_*:*_244148794_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_4337454"}], "description": "I recently implemented some changes around how vmpooler determines where to deploy VMs and a capability to support VM migration at checkout time. These changes have resulted in decreased stability of the vmpooler application, in particular a failure mode that results in pools depleting, and not refilling, during our heaviest usage periods.\n\nAfter encountering the issue after hours multiple times, and witnessing the failure mode, it has become clear that the newly introduced approach to selecting hosts is too impactful on the vsphere inventory service and the vcenter server, which results in connections being refused during a period of high load. When these connections are refused vmpooler attempts to gain new connections, but the failure leaves the vmpooler__tasks__clone queue at its maximum value, so new clone operations are never started, unless the queue is manually cleared, or the application restarted, which also clears the queue.\n\nThis ticket serves to track updating vmpooler to a version that reduces load placed upon the system by disabling migration at checkout, reverting host selection at clone time to a more simple method such as the previous one that used VM count, and/or adding an initial delay upon initialization of vspherehelper to simulate the delay we used to have of re-loading the configuration file for every connection.\n\nIf any of these solutions are not implemented in time, or do not address the issue, we will revert to the previous iteration of vmpooler that was free of these issues (prior to the new host evaluation method) until a more efficient model for tracking host utilization and selection can be implemented.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10098", "fixedVersions": [], "id": "10098", "issueType": "Task", "key": "POOLER-50", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Critical", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2016-12-05T11:14:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Return vmpooler to stable state", "timeSpent": "PT0S", "updated": "2017-03-04T21:02:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2016-12-01T14:22:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2799f79a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzai7b:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_422705769_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_55812997452"}], "description": "check_pool should prioritize user facing operations, such as filling pools, over administrative actions like inspecting vmware for VMs it can find. Unfortunately, the current model is the exact opposite, where clone operations will not begin until a pool has evaluated all other possible states, VM members, and associated queues. This ticket serves to track ripping all of this functionality out of \"check_pool\" and prioritizing these operations in a sane way.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10006", "fixedVersions": [], "id": "10006", "issueType": "Improvement", "key": "POOLER-49", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Won't Do", "resolutionDate": "2018-09-13T12:24:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Check pool should prioritize user facing operations", "timeSpent": "PT0S", "updated": "2018-09-13T12:24:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "A PR is in.", "created": "2016-12-01T10:59:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-30T10:33:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2630c8b1"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzafsv:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_87918409_*|*_10009_*:*_1_*:*_7971774977_*|*_6_*:*_1_*:*_0"}], "description": "When vmpooler is shut down threads are killed abruptly, and some tasks may be interrupted. When this happens for a migration operation a entry is left in the vmpooler__migration queue. This should be cleared when the application is started to prevent stale entries from consuming space in the queue.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10005", "fixedVersions": [], "id": "10005", "issueType": "Improvement", "key": "POOLER-48", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2017-03-03T17:22:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Clear vmpooler__migration at application start time", "timeSpent": "PT0S", "updated": "2017-03-03T17:22:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Maybe this should just be alert when pools are empty.", "created": "2016-11-30T10:28:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "We should monitor for empty pools, but also ensure the application is resilient to connection failures. PR is in and linked to improve resiliency. Prior iteration did not raise errors from the clone operation, so pool manager would continue to loop, but not succeed.", "created": "2016-12-01T13:43:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "There is another POOLER ticket that describes that sometimes clone operations fail. I have witnessed this behavior, and we are back to that version, so this is no longer relevant.", "created": "2016-12-06T10:09:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-28T17:15:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5850922d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzae1z:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_253833110_*|*_10009_*:*_1_*:*_411813572_*|*_6_*:*_1_*:*_0"}], "description": "Vmpooler periodically fails during clone operations, and cannot recover. When this happens pools drain and do not replenish until the application is restarted. This ticket serves to track resolving this issue by making vmpooler resolve the issues without an application restart, or alert on the error so that a process can have this restart.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10137", "fixedVersions": [], "id": "10137", "issueType": "Bug", "key": "POOLER-47", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Duplicate", "resolutionDate": "2016-12-06T10:09:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Alert when clone operations fail and cannot recover", "timeSpent": "PT0S", "updated": "2016-12-06T10:09:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "We actually already have this place, and they ship to ESO Noise. Let's use this ticket to see if there are any improvements to be made to our alerts for empty pools. I think they need wider visibility under certain conditions.", "created": "2016-12-06T11:48:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-28T16:49:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7805b8dc"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Risk Assessment", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Needs Assessment"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzae0v:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_673154351_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_18845277573"}], "description": "In order to catch events similar to POOLER-41 and POOLER-45 before or customers report them, we need to have alerts enabled any time a pool is drained and does not refill within a reasonable amount of time. These alerts should be reported in the ESO Noise hipchat room.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10080", "fixedVersions": [], "id": "10080", "issueType": "Improvement", "key": "POOLER-46", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Fixed", "resolutionDate": "2017-07-12T15:36:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Enable alerts when pools do not refill", "timeSpent": "PT0S", "updated": "2017-07-12T15:36:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "body": "Just wanted to note that this happened again this morning starting sometime around 0311 PST. Here's a snapshot dashboard showing the problem:\nhttp://grafana.ops.puppetlabs.net/dashboard/snapshot/nmt7B4Q4Cv3AKuJGWgGmKgO4N2vFe5HG\n\nInterestingly, vmpooler-cinext did not experience a similar failure to clone:\nhttp://grafana.ops.puppetlabs.net/dashboard/snapshot/nmt7B4Q4Cv3AKuJGWgGmKgO4N2vFe5HG\n\nIt looks like someone restarted vmpooler around 0530 PST.\n\nI also noticed this:\n\n{code}\n$ ssh vmpooler tail -n 20000 | egrep \"(Connection|connection|MethodFault)\"\n[2016-11-24 03:11:14] [!] [ubuntu-1204-x86_64] 'juf3nepkfids3ok' clone failed with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [redhat-6-i386] 'fsat3zwcj26t09a' clone failed with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [ubuntu-1404-x86_64] 'p22lfwky9iv5wq1' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [ubuntu-1404-x86_64] 'k15v83q2595nifo' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [centos-6-x86_64] 'yhwc7f92iph2wfe' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [sles-11-i386] 'ifd2k0de0g69vir' clone failed with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [scientific-5-x86_64] '' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [redhat-7-x86_64] 'nx7zqeas0p0xfpo' clone failed with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [ubuntu-1204-x86_64] '' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [x] [ubuntu-1204-i386] 'ykc66ho7sg3gp3w' migration failed with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [centos-6-x86_64] 'pw7wgk6m1jtirh7' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [scientific-5-i386] 'vkpjb0cucgjmyej' clone failed with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [redhat-5-x86_64] 'p3mmt7k4xewd4yu' clone failed with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [redhat-6-i386] '' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [sles-11-i386] '' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [ubuntu-1204-x86_64] '' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [scientific-5-i386] '' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [!] [ubuntu-1204-x86_64] '' failed while preparing to clone with an error: Connection refused - Connection refused\n[2016-11-24 03:11:14] [x] [redhat-7-x86_64] 'bswmyyx3opmx7su' migration failed with an error: Connection refused - Connection refused\n{code}\n", "created": "2016-11-24T10:34:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It looks like the failure affected statsfeeder again as well.\n\nAfter digging, it looks like the problem is due to a change in error reporting that catches an error that used to be raised. The implication of this is that we receive the logged error message, so we get an indication of the error, but the error is not raised, which means the thread stays alive, and tries to continue to reuse the failed connection. Prior to this change the failure would occur silently, and cause the thread to die, which would trigger the method to be called again (I think). I have some changes I'm working on to resolve this [here|https://github.com/puppetlabs/vmpooler/compare/master...mattkirby:send_clone_error_up]. I still don't know why this has started and affecting statsfeeder as well.\n\nPyvmomi provides session information pretty easily under sessionList. We have 379 connections to vcenter as of this writing. A lot of templates connect to vcenter, and I don't know if they all close their connections. This could be related to the problem, but I don't know definitively yet.\n\nAnd just like that it's back down to 268 connections.", "created": "2016-11-24T11:28:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "These issues are fixed in current versions.", "created": "2018-04-18T18:06:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-23T05:45:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@53372cf4"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzacgv:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "24/Nov/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1144993353_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_43046217123"}], "description": "When performing clone operations vmpooler started encountering errors at 1:31PST:\n{quote}\n[2016-11-23 01:31:47] [!] [redhat-7-x86_64] 'p40zo4h472kzk7d' clone failed with an error: Connection refused - Connection refused\n{quote}\n\nAt the same time in the log it reports a large number of VMs were not found in inventory, and removed from the 'completed' queue.\n\nWhen these errors occurred, clone operations would not resume until the application was restarted.\n\nIt's not clear to me whether the cause was an application failure, a vmware failure, or some combination thereof. OPS-11800 triggered indicating statsfeeder was not updating, and I validated this was the case until it was restarted.\n\nThe increased logging around clone failures is a result of some of the pool manager improvements. It's not clear what me to happened when it failed, and we should continue to monitor for recurrence of the issues.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10097", "fixedVersions": [], "id": "10097", "issueType": "Bug", "key": "POOLER-45", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-04-18T18:06:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Clone operations failed until a restart", "timeSpent": "PT0S", "updated": "2018-04-18T18:06:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "body": "So in addition to the {{vmpooler.migrate.<template>}} endpoint that I described above I think there are two other metrics endpoints we should consider including when fixing this issue:\n\n* {{vmpooler.migrate_from.<vsphere_hostname>}}\nThis endpoint would keep count of VMs migrated from the named vsphere host (eg opdx-a2-chassis6-8.ops.puppetlabs.net)\n* {{vmpooler.migrate_to.<vsphere_hostname>}}\nThis endpoint would keep count of VMs migrated to the named vsphere host (eg opdx-a2-chassis6-8.ops.puppetlabs.net)\n\nThe advantage of these metrics is that they would let us correlate migration events with changes in host CPU ready time as seen in this graph:\nhttp://grafana.ops.puppetlabs.net/dashboard/db/vmware-host-cpu-ready?panelId=2&fullscreen\n\nLet me know if it would be better to file a separate ticket for these proposed new metrics.", "created": "2016-11-22T14:52:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This is up and running on vmpooler-provisioner-dev-2 and reporting metrics to graphite-test. Feel free to have a look there to confirm it look good to you.", "created": "2016-11-23T12:37:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-22T14:35:00.000000", "creator": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3bfa2936"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hzac6n:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "23/Nov/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_252360_*|*_10007_*:*_1_*:*_423260195_*|*_3_*:*_1_*:*_78992188_*|*_10009_*:*_1_*:*_85219666_*|*_5_*:*_1_*:*_0"}], "description": "After [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72] deployed the vmpooler migrate at checkout time feature to the production vmpooler I wanted to update the {{vmpooler-dynamic-ready}} graph to show migrations in addtion to {{running}}, {{clone}}, and {{destroy}} statistics. The dashboard I am talking about is here: \n\nhttp://grafana.ops.puppetlabs.net/dashboard/db/vmpooler-ready-dynamic\n\nWhen I attempted to add the {{vmpooler.migrate.<template_name>.count}} metric to the graphs in this dashboard I found that instead of {{<template_name>}} the metric recorded at this endpoint is VM name. For example instead of finding {{vmpooler.migrate.redhat-7-x86_64.count}} as I would expect based on established metrics like {{vmpooler.clone.redhat-7-x86_64.count}}, I found {{vmpooler.migrate.b5zbgcmksjngnyz.count}}. ", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10118", "fixedVersions": [], "id": "10118", "issueType": "Bug", "key": "POOLER-44", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "resolution": "Fixed", "resolutionDate": "2016-11-29T09:51:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "'vmpooler.migrate.<template_name>' metric appears to be reporting individual VM names rather than template names", "timeSpent": "PT0S", "updated": "2016-11-29T09:51:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have made some good progress here. Specifically, my tests are now validating the results of having run the migrate_vm method from within the rspec test with the vsphere interactions mocked. To accomplish this required restructuring pool manager so it no longer expects to find a global object containing the vsphere connection, and instead expects the connection to be passed to the methods using it. This makes the flow of connections more clear, and importantly, possible to test without a real vsphere connection.\n\nTo accomplish this did require some more changes to the vmpooler application. My plan here is to limit my testing to the migration at checkout portion of pool manager (and any other components that require test additions) and to refrain from adding test for vsphere helper at this time. I did also make changes to vsphere helper that will make it possible to build tests for it, without a bunch of hackery, by fixing the way it loads its configuration.\n\nI now need to clean up my branch and get review on the changes.", "created": "2016-11-16T10:27:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "These tests are implemented in my branch, which I believe is ready for merge pending any additional review.", "created": "2016-11-17T11:34:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-10T10:30:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5abc9605"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hymkzz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_83932_*|*_10007_*:*_1_*:*_14339_*|*_3_*:*_1_*:*_527699911_*|*_10009_*:*_1_*:*_80802156_*|*_6_*:*_1_*:*_0"}], "description": "In order to provide confidence in the changes submitted for merge in [pr #167 to vmpooler|https://github.com/puppetlabs/vmpooler/pull/167] I need to implement tests around the functionality. The current tests for pool manager don't test much, and so my goal is to implement tests that validate functionality more so than existing tests, to follow the example of tests such as this [vm_template_spec.rb|https://github.com/puppetlabs/vmpooler/blob/master/spec/vmpooler/api/v1/vm_template_spec.rb#L183] test which is more stateful. This ticket serves to track implementing tests sufficient to validate the new functionality, and conform to the standards maintained on new vmpooler functionality.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10033", "fixedVersions": [], "id": "10033", "issueType": "Task", "key": "POOLER-43", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2016-11-17T11:34:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Implement tests validating migrate at checkout additions to vmpooler", "timeSpent": "PT0S", "updated": "2016-11-17T11:34:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I've configured log rotation as a part of the pooler_latest branch, the changes for which are landing 8/2.", "created": "2017-08-01T14:08:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-08T17:39:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@621ad16"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hza0v3:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2398262415_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_20567881971"}], "description": "Vmpooler.log gets very large. We should rotate its logfile periodically.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10117", "fixedVersions": [], "id": "10117", "issueType": "Improvement", "key": "POOLER-42", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Done", "resolutionDate": "2017-08-01T14:08:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Implement log rotation for vmpooler.log", "timeSpent": "PT0S", "updated": "2017-08-01T14:08:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Looking at the sles11 VMs they are moving from pending to ready in ~3 minutes, which is a reasonable amount of time. I see several VMs becoming ready just after 3:22am for sles-11. Based on what I see I think that it makes sense to increase the size of the pool, and I think that will likely alleviate these issues. I have submitted [pr #6578|https://github.com/puppetlabs/puppetlabs-modules/pull/6578] to update the size of sles-11 and 12 pools.\n\nOne improvement we could make to vmpooler is to prioritize moving pools out of empty over getting a pool filled. Sometimes it will spend its effort bringing a large pool from 40 to 60 VMs, while a few small pools sit empty waiting their turn.", "created": "2016-11-08T18:30:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Thanks, [~accountid:557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72]!", "created": "2016-11-09T15:43:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-08T10:58:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5adf7d99"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hza0dz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "09/Nov/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9646_*|*_10007_*:*_1_*:*_20799044_*|*_10009_*:*_1_*:*_55398942_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_27277249"}], "description": "Integration is experiencing failures believed to be caused by drained sles pools. \n\nSpecifically:\njob link:\u2002\u2002https://jenkins-enterprise.delivery.puppetlabs.net/job/enterprise_pe-acceptance-tests_integration-sy...\nsles11:\u2002\u20023:22am\nsles12:\u2002\u20023:30am \n\nNo failures were encountered in last night's run, so this appears to be intermittent. \n\nFor this ticket, please check to see if the bootstrap process for these templates are taking longer than expected, and consider increasing these pool sizes by 5 (?). They are both currently capped at 15.\n\nCC [~accountid:557058:720f602d-ed4c-4553-a2cf-8e7dc40c0739]", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10160", "fixedVersions": [], "id": "10160", "issueType": "Bug", "key": "POOLER-41", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Fixed", "resolutionDate": "2016-11-09T15:43:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "sles-11-x86_64 and sles-12-x86_64 pools are draining under heavy load, causing tests to fail", "timeSpent": "PT0S", "updated": "2016-11-09T15:43:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This shouldn't happen anymore due to changes in find VM.", "created": "2018-07-03T10:08:00.000000"}], "components": ["VM Pooler"], "created": "2016-11-03T10:34:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@30e5b7fd"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz9xtz:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_2859376555_*|*_10009_*:*_1_*:*_625013741_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_49592018502"}], "description": "When a folder is created for a template within another folder intended for deploys vmpooler thinks it has discovered a VM with the template title. For example, in a folder for redhat-7-x86_64 VMs, if I accidentally create a folder called redhat-4-i386, vmpooler discovers a VM called redhat-4-i386 that it didn't know about, and then destroys it. Unfortunately, what vmpooler thinks was a discovered VM of that title is actually a folder intended to store creates VMs that matches the title of the source template, which is then destroyed. When this happens any source template for the pool causes new VM deploys to fail.\n\nThe application should inspect an object that is discovered to validate it is a VM before taking any action.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10004", "fixedVersions": [], "id": "10004", "issueType": "Bug", "key": "POOLER-40", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-07-10T18:01:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Folders are treated as VMs when within VM target folders", "timeSpent": "PT0S", "updated": "2018-07-10T18:01:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Thanks [~accountid:557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241], I'm working through the templates now.", "created": "2016-10-27T15:16:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "In testing cisco-wrlinux-5-x86_64 I find that I cannot get the hostname with {{vmtoolsd --cmd \"info-get guestinfo.name\"}}. I'll need to figure out a alternate reliable way to get this information.", "created": "2016-10-31T11:14:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It turns out that the {{eso-template@vsphere.local}} user password was no longer accepted. I set the password to the baked-in value we have been using and can now login using it. I am watching to see if this allows pools to refill. I don't know why this was no longer accepted.", "created": "2016-10-31T16:24:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The Cisco wrlinux template hostname lookup method expects there to be no more than a single cisco wrlinux folder in the vmware inventory. This is why the pool cannot move VMs to the ready state in vmpooler-cinext. This method needs to be updated to be less terrible. The vmtoolsd guestinfo method is preferable, but this information is not actually populated for these platforms so we would have to populate it post-deploy and verify it works as expected.\n\nAn alternate idea for consideration is to simplify this model. It's bad for VMs to make a connection to vsphere directly because vsphere connections do not scale well and inventory operations are expensive (more connections = bad for vmware). If vmpooler had an API endpoint that returned a VM hostname based on a piece of identifying data, such as its current IP address, then all platforms could query this API endpoint to retrieve their hostname and allow vmpooler to handle the lookup operation. Moving away from vmpooler would only require we maintain something to provide this capability. The benefit is that all of this can be done with a single vsphere connection instead of multiples.\n\nIn the short term I will update the method used to be less fragile and a little more concise. I hope to have an update later today.", "created": "2016-11-01T10:28:00.000000"}, {"author": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "body": "bq. If vmpooler had an API endpoint that returned a VM hostname based on a piece of identifying data, such as its current IP address, then all platforms could query this API endpoint to retrieve their hostname and allow vmpooler to handle the lookup operation.\n\n+1 on this, we should file a ticket for this work\n\nbq. In the short term I will update the method used to be less fragile and a little more concise. I hope to have an update later today.\n\nhooray!", "created": "2016-11-01T11:34:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Refactoring was necessary to fix the wrlinux stuff. They were mostly the same between 5/7 so I unified them and submitted a pull request, [#15|https://github.com/puppetlabs/pooler-template-bootstrap/pull/15/files]. This is what is being used to deploy both templates now.", "created": "2016-11-02T20:47:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Everything enters the ready state now. PRs are submitted for template changes and awaiting review.", "created": "2016-11-04T14:22:00.000000"}], "components": ["VM Pooler"], "created": "2016-10-27T14:56:00.000000", "creator": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1d12e7b4"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz9tdr:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "27/Oct/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1187629_*|*_3_*:*_1_*:*_687959500_*|*_6_*:*_1_*:*_0"}], "description": "The following pools are failing to enter the 'ready' state on vmpooler-cinext:\n\nhttp://grafana.ops.puppetlabs.net/dashboard/db/vmpooler-ready-dynamic-cinext\n\n* cisco-wrlinux-5-x86_64 (/)\n* cisco-wrlinux-7-x86_64 (/)\n* centos-4-i386 (/)\n* redhat-4-i386 (/)\n\nThe following pools are failing to enter the 'ready' state on production vmpooler:\n\nhttp://grafana.ops.puppetlabs.net/dashboard/db/vmpooler-ready-dynamic\n\n* redhat-4-x86_64 (/)\n* solaris-10-u8-x86_64 (/)\n\nIt's also worth pointing out that each of these pools exhibits a similar clone-and-destroy pattern in their respective state dashboards.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10054", "fixedVersions": [], "id": "10054", "issueType": "Bug", "key": "POOLER-39", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "resolution": "Fixed", "resolutionDate": "2016-11-04T14:22:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Some vmpooler-cinext and production vmpooler pools are failing to enter 'ready' state", "timeSpent": "PT0S", "updated": "2016-11-04T14:22:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Also, redhat-4-x89_64 drained around 7:00AM and has not refilled. All other pools look OK.\n\nhttp://grafana.ops.puppetlabs.net/dashboard/db/vmpooler-ready-dynamic", "created": "2016-10-27T10:03:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It looks like the Solaris cloned VMs are exhibiting symptoms like those we saw when centos-4-x86_64 templates stopped filling the pool. Specifically, VMs are being deployed, but are not properly setting hostname, and as a result are sending something like localhost.localdomain to the DHCP server. Since we rely upon dynamic DNS in order to access provisioned machines this means they never get past the \"pending/booting\" state in vmpooler as to get past that state requires connecting on port 22 to the DNS short name.", "created": "2016-10-27T10:23:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Looking into this further it does look like the same issue as the centos-4-x86_64 problem we had recently. I am not sure why the behavior of the bootstrap process has recently stopped working, or changed, but I recall seeing some discussion that the method we used to use for this is no longer preferred. With that said I could not find how to get this information from vmtools on the guest VM directly.\n\nThis pull request is the change that was made for [centos-4|https://github.com/puppetlabs/pooler-template-bootstrap/pull/14]. There is some discussion there questioning why the results of these API actions have changed, and I don't know the answer to that. I can attempt to dig into it further. The short story appears to be that they do not consistently return the results we expect.\n\nI am not sure where to place these files when updating, so I am not going to attempt to fix this in the linked repository and template unless I'm asked, as it's a easter egg hunt for me. I am happy to pair with someone, or work from documentation to update what is there and the template, I'm just not familiar enough with our process now to do so with any confidence. I believe [this file|https://github.com/puppetlabs/pooler-template-bootstrap/blob/master/solaris-10/vcloud-bootstrap.py] is what will require an update, then it will need to be deployed to the template and tested.\n\n[~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1], can you please help me determine whether someone can help resolve the bootstrap issue, or if I should work on it/pair with someone?", "created": "2016-10-27T10:48:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "[~accountid:557058:7ed56b44-735e-4cdb-b168-470ef48d5112], I'm not sure we'll be able to figure this one out. Our pooler/template experts are focused on puppet agent 1.8.0. Can this wait until next week?", "created": "2016-10-27T12:26:00.000000"}, {"author": "557058:3bbf1c83-df0e-4372-887d-cfc38dee9330", "body": "If the redhat-4-x86_64 pool is empty, I believe that will impact nightly PE testing.  \n\nAre the redhat-4-x86_64 clones hanging while running the vcloud bootstrap script? \n\n/cc [~accountid:557058:ab1874a9-45ab-4efc-91aa-5200c165b2c4] [~accountid:557058:b3e6b0df-198d-45f8-9ba5-d41f90e80ced]", "created": "2016-10-27T13:59:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "[~accountid:557058:3bbf1c83-df0e-4372-887d-cfc38dee9330], it looks like it's the same issue affecting both pools. The hostname for redhat-4-x86_64 cloned VMs is not getting set at clone time.", "created": "2016-10-27T14:09:00.000000"}, {"author": "557058:7ed56b44-735e-4cdb-b168-470ef48d5112", "body": "The solaris-10 work I'm doing is not critical to an imminent release, so this can wait until next week for me. ", "created": "2016-10-27T21:16:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "After digging further I think this is because the {{eso-template@vsphere.local}} user password is no longer accepted.", "created": "2016-10-31T15:57:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I fixed the bootstrapping script and deployed it to the template.\n\nThe problem is that the lookup method assumed there would be only one folder matching the title of the VM. I have updated this to instead use a method in pyvmomi to search by IP and return the VM name. A pull request is in and more detail is available in POOLER-39.", "created": "2016-11-04T13:32:00.000000"}], "components": ["VM Pooler"], "created": "2016-10-27T10:01:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@18d4a44e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hymmpb:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "27/Oct/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_727641_*|*_3_*:*_1_*:*_703160890_*|*_6_*:*_1_*:*_0"}], "description": "Around 4AM this morning, the solaris-10-u8-x86_64 pool drained and did not refill. \n\nSee SRE hipchat channel for more details, starting at 8:26AM (10/27/16).\n\nCC [~accountid:557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241] [~accountid:557058:7ed56b44-735e-4cdb-b168-470ef48d5112]", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10128", "fixedVersions": [], "id": "10128", "issueType": "Bug", "key": "POOLER-38", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Fixed", "resolutionDate": "2016-11-04T13:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Empty solaris-10-u8-x86_64 pool", "timeSpent": "PT0S", "updated": "2016-11-04T13:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "attachments": [], "comments": [{"author": "63d40635a05386069cdb69d6", "body": "I'm unclear exactly what this ticket is asking for. This document exists, but maybe it doesn't cover what this ticket is supposed to cover? [https://github.com/puppetlabs/vmpooler/blob/master/docs/API.md]\u00a0", "created": "2018-04-23T13:19:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I think this ticket is asking for a list of expected http responses from specific vmpooler API endpoints, like what was added for /config/poolsize and some other recent additions. My thought is that by documenting this behavior the clients others are building should work consistently. Also, it should make it more straightforward to communicate changes in any particular endpoint.", "created": "2018-10-03T13:13:00.000000"}], "components": ["VM Pooler"], "created": "2016-10-21T10:29:00.000000", "creator": "70121:ef2e2611-7893-499f-94ad-b7b2245beace", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4732575c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hykm9j:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "23/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_3982933158_*|*_10007_*:*_1_*:*_2845000_*|*_3_*:*_1_*:*_82148215_*|*_5_*:*_2_*:*_42822_*|*_10006_*:*_2_*:*_8893969604_*|*_10005_*:*_1_*:*_50453267457"}], "description": "As a consumer of the vmpooler API, it would be extremely nice if the documentation contained possible HTTP responses for each endpoint.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10051", "fixedVersions": [], "id": "10051", "issueType": "Improvement", "key": "POOLER-37", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "70121:ef2e2611-7893-499f-94ad-b7b2245beace", "resolution": "Fixed", "resolutionDate": "2018-10-25T09:49:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Document all possible HTTP responses for API", "timeSpent": "PT0S", "updated": "2018-10-25T09:49:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Key/value pairs were moved into the completed queue for centos-4-x86_64, which is what caused the failure. I removed these and deleted the VMs. The status API now returns.", "created": "2016-10-21T00:55:00.000000"}], "components": [], "created": "2016-10-21T00:03:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1b757203"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz9p6v:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_32449486_*|*_6_*:*_1_*:*_0"}], "description": "Production vmpooler is returning a backtrace from the status API. It looks to be the result of redis keys storing values the API is not expecting.\n\n{quote}\nRedis::CommandError: ERR Operation against a key holding the wrong kind of value\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis/client.rb:103:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:1213:in `scard'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:37:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/1.9/monitor.rb:211:in `mon_synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/1.9/monitor.rb:210:in `mon_synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:37:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:1212:in `scard'\n\t/var/lib/vmpooler/lib/vmpooler/api/helpers.rb:163:in `get_queue_metrics'\n\torg/jruby/RubyArray.java:1613:in `each'\n\t/var/lib/vmpooler/lib/vmpooler/api/helpers.rb:159:in `get_queue_metrics'\n\t/var/lib/vmpooler/lib/vmpooler/api/v1.rb:133:in `HEAD /api/v1/status/?'\n\torg/jruby/RubyMethod.java:124:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1603:in `compile!'\n\torg/jruby/RubyProc.java:271:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:966:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:985:in `route_eval'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:966:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1006:in `process_route'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1004:in `process_route'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:964:in `route!'\n\torg/jruby/RubyArray.java:1613:in `each'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:963:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:945:in `forward'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1019:in `route_missing'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:980:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:976:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:945:in `forward'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1019:in `route_missing'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:980:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:976:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:945:in `forward'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1019:in `route_missing'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:980:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:976:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1478:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1788:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1478:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:490:in `handle_request'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:488:in `handle_request'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:361:in `process_client'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:357:in `process_client'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:254:in `run'\n\torg/jruby/RubyProc.java:271:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/thread_pool.rb:92:in `spawn_thread'%                                                            \u279c  ~ curl -s http://vmpooler.delivery.puppetlabs.net/api/v1/status\nRedis::CommandError: ERR Operation against a key holding the wrong kind of value\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis/client.rb:103:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:1213:in `scard'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:37:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/1.9/monitor.rb:211:in `mon_synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/1.9/monitor.rb:210:in `mon_synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:37:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:1212:in `scard'\n\t/var/lib/vmpooler/lib/vmpooler/api/helpers.rb:163:in `get_queue_metrics'\n\torg/jruby/RubyArray.java:1613:in `each'\n\t/var/lib/vmpooler/lib/vmpooler/api/helpers.rb:159:in `get_queue_metrics'\n\t/var/lib/vmpooler/lib/vmpooler/api/v1.rb:133:in `HEAD /api/v1/status/?'\n\torg/jruby/RubyMethod.java:124:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1603:in `compile!'\n\torg/jruby/RubyProc.java:271:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:966:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:985:in `route_eval'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:966:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1006:in `process_route'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1004:in `process_route'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:964:in `route!'\n\torg/jruby/RubyArray.java:1613:in `each'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:963:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:945:in `forward'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1019:in `route_missing'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:980:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:976:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:945:in `forward'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1019:in `route_missing'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:980:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:976:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:945:in `forward'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1019:in `route_missing'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:980:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:976:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/show_exceptions.rb:21:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/nulllogger.rb:9:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-1.5.2/lib/rack/head.rb:11:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:180:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:2014:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1478:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1788:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1478:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:490:in `handle_request'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:488:in `handle_request'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:361:in `process_client'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:357:in `process_client'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/server.rb:254:in `run'\n\torg/jruby/RubyProc.java:271:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/puma-2.9.0-java/lib/puma/thread_pool.rb:92:in `spawn_thread'%                                                            \u279c  ~ curl -s http://vmpooler.delivery.puppetlabs.net/api/v1/status\nRedis::CommandError: ERR Operation against a key holding the wrong kind of value\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis/client.rb:103:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:1213:in `scard'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:37:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/1.9/monitor.rb:211:in `mon_synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/1.9/monitor.rb:210:in `mon_synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:37:in `synchronize'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/redis-3.1.0/lib/redis.rb:1212:in `scard'\n\t/var/lib/vmpooler/lib/vmpooler/api/helpers.rb:163:in `get_queue_metrics'\n\torg/jruby/RubyArray.java:1613:in `each'\n\t/var/lib/vmpooler/lib/vmpooler/api/helpers.rb:159:in `get_queue_metrics'\n\t/var/lib/vmpooler/lib/vmpooler/api/v1.rb:133:in `HEAD /api/v1/status/?'\n\torg/jruby/RubyMethod.java:124:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1603:in `compile!'\n\torg/jruby/RubyProc.java:271:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:966:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:985:in `route_eval'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:966:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1006:in `process_route'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1004:in `process_route'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:964:in `route!'\n\torg/jruby/RubyArray.java:1613:in `each'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:963:in `route!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1076:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1073:in `dispatch!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\torg/jruby/RubyKernel.java:1284:in `catch'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:1058:in `invoke'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:898:in `call!'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/sinatra-1.4.5/lib/sinatra/base.rb:886:in `call'\n\t/usr/local/share/pl-jruby/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10096", "fixedVersions": [], "id": "10096", "issueType": "Bug", "key": "POOLER-36", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2016-10-21T09:04:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Status API returns redis backtrace", "timeSpent": "PT0S", "updated": "2016-10-21T09:04:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "70121:ef2e2611-7893-499f-94ad-b7b2245beace", "body": "This can probably be closed. After looking at the source code, it looks like vmpooler generates a snapshot name, returns that to the user, and then places the host and snapshot name on a queue to be snapshot later. So it makes sense why this behavior happens.", "created": "2016-10-21T10:31:00.000000"}], "components": [], "created": "2016-10-20T16:58:00.000000", "creator": "70121:ef2e2611-7893-499f-94ad-b7b2245beace", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@50ddeadb"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz9p4v:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_4046043734_*|*_6_*:*_1_*:*_0"}], "description": "After taking a snapshot of the vm, vmpooler immediately responds back with a hash of the snapshot. However it seems to take ~5 or more minutes for the snapshots key to actually show up when you query the hostname from vmpooler.\n\nWorkflow:\n\n* Request a vm\n* Take a snapshot against that host\n* Query hostname from pooler\n\n{noformat}\nbrian@localhorse:~ % floaty get debian-7-x86_64 --verbose\nI, [2016-10-20T15:51:43.839714 #23459]  INFO -- : post https://vcloud.delivery.puppetlabs.net/api/v1/vm/debian-7-x86_64\nD, [2016-10-20T15:51:43.839808 #23459] DEBUG -- request: User-Agent: \"Faraday v0.9.2\"\nX-AUTH-TOKEN: \"<REMOVED>\"\nI, [2016-10-20T15:51:45.018965 #23459]  INFO -- Status: 200\nD, [2016-10-20T15:51:45.019040 #23459] DEBUG -- response: server: \"nginx\"\ndate: \"Thu, 20 Oct 2016 22:51:45 GMT\"\ncontent-type: \"application/json\"\ncontent-length: \"117\"\nconnection: \"close\"\nx-content-type-options: \"nosniff\"\n{\"debian-7-x86_64\":\"rtb3iv8taexhzxj.delivery.puppetlabs.net\"}\nbrian@localhorse:~ % floaty snapshot rtb3iv8taexhzxj.delivery.puppetlabs.net --verbose\nI, [2016-10-20T15:51:53.611083 #23497]  INFO -- : post https://vcloud.delivery.puppetlabs.net/api/v1/vm/rtb3iv8taexhzxj.delivery.puppetlabs.net/snapshot\nD, [2016-10-20T15:51:53.611157 #23497] DEBUG -- request: User-Agent: \"Faraday v0.9.2\"\nX-AUTH-TOKEN: \"<REMOVED>\"\nI, [2016-10-20T15:51:53.866657 #23497]  INFO -- Status: 202\nD, [2016-10-20T15:51:53.866747 #23497] DEBUG -- response: server: \"nginx\"\ndate: \"Thu, 20 Oct 2016 22:51:53 GMT\"\ncontent-type: \"application/json\"\ncontent-length: \"95\"\nconnection: \"close\"\nx-content-type-options: \"nosniff\"\n{\"ok\"=>true,\n \"rtb3iv8taexhzxj\"=>{\"snapshot\"=>\"k4tn3yahy6n6pmx3tzjtb3cy7g4fcz3f\"}}\nbrian@localhorse:~ % floaty query rtb3iv8taexhzxj.delivery.puppetlabs.net --verbose\nI, [2016-10-20T15:52:00.167050 #23502]  INFO -- : get https://vcloud.delivery.puppetlabs.net/api/v1/vm/rtb3iv8taexhzxj.delivery.puppetlabs.net\nD, [2016-10-20T15:52:00.167136 #23502] DEBUG -- request: User-Agent: \"Faraday v0.9.2\"\nI, [2016-10-20T15:52:00.453559 #23502]  INFO -- Status: 200\nD, [2016-10-20T15:52:00.453648 #23502] DEBUG -- response: server: \"nginx\"\ndate: \"Thu, 20 Oct 2016 22:52:00 GMT\"\ncontent-type: \"application/json\"\ncontent-length: \"210\"\nconnection: \"close\"\nx-content-type-options: \"nosniff\"\n{\"ok\"=>true,\n \"rtb3iv8taexhzxj\"=>\n  {\"template\"=>\"debian-7-x86_64\",\n   \"lifetime\"=>12,\n   \"running\"=>0.0,\n   \"state\"=>\"running\",\n   \"ip\"=>\"10.32.125.109\",\n   \"domain\"=>\"delivery.puppetlabs.net\"}}\n{noformat}\n\nAfter a while...it eventually shows up:\n\n{noformat}\nbrian@localhorse:~ % floaty query rtb3iv8taexhzxj.delivery.puppetlabs.net --verbose\nI, [2016-10-20T15:56:12.496455 #23519]  INFO -- : get https://vcloud.delivery.puppetlabs.net/api/v1/vm/rtb3iv8taexhzxj.delivery.puppetlabs.net\nD, [2016-10-20T15:56:12.496553 #23519] DEBUG -- request: User-Agent: \"Faraday v0.9.2\"\nI, [2016-10-20T15:56:14.153018 #23519]  INFO -- Status: 200\nD, [2016-10-20T15:56:14.153109 #23519] DEBUG -- response: server: \"nginx\"\ndate: \"Thu, 20 Oct 2016 22:56:14 GMT\"\ncontent-type: \"application/json\"\ncontent-length: \"278\"\nconnection: \"close\"\nx-content-type-options: \"nosniff\"\n{\"ok\"=>true,\n \"rtb3iv8taexhzxj\"=>\n  {\"template\"=>\"debian-7-x86_64\",\n   \"lifetime\"=>12,\n   \"running\"=>0.08,\n   \"state\"=>\"running\",\n   \"snapshots\"=>[\"k4tn3yahy6n6pmx3tzjtb3cy7g4fcz3f\"],\n   \"ip\"=>\"10.32.125.109\",\n   \"domain\"=>\"delivery.puppetlabs.net\"}}\n{noformat}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10127", "fixedVersions": [], "id": "10127", "issueType": "Bug", "key": "POOLER-35", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "70121:ef2e2611-7893-499f-94ad-b7b2245beace", "resolution": "Won't Do", "resolutionDate": "2016-12-06T11:52:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler doesn't add \"snapshots\" key to vm status right away", "timeSpent": "PT0S", "updated": "2016-12-06T11:52:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "I'm breaking the relates to link, to IMAGES-364, so that the migration dashboard more accurately reflects our progress. While we do still need this ticket, it's not blocking migration.", "created": "2017-02-08T09:55:00.000000"}], "components": ["VM Pooler"], "created": "2016-10-17T18:09:00.000000", "creator": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@4e284e86"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz9n4f:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "08/Feb/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_4301019470_*|*_10007_*:*_1_*:*_326265100_*|*_3_*:*_1_*:*_285226_*|*_10009_*:*_1_*:*_29775356_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_49860981046"}], "description": "When testing a change for vmpooler-cinext [~accountid:63d40628f6e1b543161789a7] encountered an issue where centos-4-x86_64 pooled VMs do not leave the pending state indicating it was never able to make a socket connection on port 22 to the VM via its short name. When investigating the VMs they appear stuck in a boot loop. When investigating how this compares to production we can see that there are VMs in the pool, but they took between 20 minutes and three hours to reach the ready state from pending/booting, which is evident by looking at the VM object keys/values in redis. Since we collect this data it would be useful to graph it or store it somewhere in order to assist in troubleshooting clone issues and identify proactively when a VM issue exists that will lead to pool depletion.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10078", "fixedVersions": [], "id": "10078", "issueType": "Improvement", "key": "POOLER-34", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "resolution": "Fixed", "resolutionDate": "2018-07-10T18:07:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Report time a VM takes from clone to ready state", "timeSpent": "PT0S", "updated": "2018-07-10T18:07:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It looks to me like this has been completed. Please re-open if I am incorrect.", "created": "2018-04-16T13:17:00.000000"}], "components": [], "created": "2016-10-14T04:54:00.000000", "creator": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@49efb788"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hymsmn:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "16/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_6594_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_47463781482"}], "description": "This is simply a matter of cherry picking [PR 160|https://github.com/puppetlabs/vmpooler/pull/160] from {{ci-next}} to {{master}}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10032", "fixedVersions": [], "id": "10032", "issueType": "Task", "key": "POOLER-55", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "resolution": "Fixed", "resolutionDate": "2018-04-16T13:17:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler: Allow per pool selection of TTL times", "timeSpent": "PT0S", "updated": "2018-04-16T13:17:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This change has been merged. VMs have their data marked for expiration when clone fails now.", "created": "2018-07-03T10:07:00.000000"}], "components": ["VM Pooler"], "created": "2016-09-14T06:19:00.000000", "creator": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@719ceeaa"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz8u3b:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "03/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_38360550_*|*_10009_*:*_1_*:*_1092759607_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_55647322753"}], "description": "When a VM is destroyed via the vmpooler, its associated vmpooler__vm__$name key has an expire TTL set and is expired after 7 days ( not sure where that number came from ).  If a vmpooler clone operations fails, no ttl is set and the key remains in redis until it's manually deleted or expired.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10159", "fixedVersions": [], "id": "10159", "issueType": "Bug", "key": "POOLER-31", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "resolution": "Fixed", "resolutionDate": "2018-07-03T10:07:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "no ttl or deletion of redis key when vmpooler clone fails", "timeSpent": "PT0S", "updated": "2018-07-03T10:07:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [{"attacher": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "created": "2016-09-08T09:46:00.000000", "name": "build58_failure.txt", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10011"}], "comments": [{"author": "557058:cf9507a2-54bf-4dec-90af-b6ff0ddd3f65", "body": "From the SRE hipchat room yesterday.\n\n{code}\n[Sep-7 11:21 AM] Eric Williamson: Can anyone help me troubleshoot a very odd CI issue? On of our 2016.5.x mono smoke jobs requested a sles 11 vm - however the VM it got already had puppet installed from a 3.8.x job\n[Sep-7 11:21 AM] Eric Williamson: lfms4y7kp7hfarb.delivery.puppetlabs.net\n[Sep-7 11:21 AM] Eric Williamson: is the vm in question\n[Sep-7 11:21 AM] Eric Williamson: and just found the 3.8.x job it was used in\n[Sep-7 11:21 AM] Eric Williamson: https://jenkins-enterprise.delivery.puppetlabs.net/view/pe-integration/view/pe-3.8.x/job/enterprise_...\n[Sep-7 11:21 AM] JIRA: [image: https://tickets.puppetlabs.com/images/16jira.png]  PE-3 : ruby Reported by Kenn Hussey \n\t\tType:\u00a0 [image: https://tickets.puppetlabs.com/images/icons/issuetypes/subtask_alternate.png] Sub-task \u00a0 Priority:\u00a0  [image: https://tickets.puppetlabs.com/images/icons/priorities/major.png] Major \u00a0 Status:\u00a0  Closed \n\t\tAssignee: Branan Riley\n[Sep-7 11:25 AM] Eric Williamson: not a one off issue either, James Stock's reported a similar issue this morning, but with a centos VM\n[Sep-7 11:27 AM] Eric Williamson: and an ubuntu VM just failed with the same thing\n[Sep-7 11:27 AM] Eric Williamson: @here ^^\n[Sep-7 11:28 AM] Richard Raseley: There were some ad-hoc restarts of the vmpooler service this morning. It's plausible that this could've have impacted various requests to its API, resulting in some weird failure state.\n[Sep-7 11:29 AM] Richard Raseley: Did the tests that should have torn down these VMs run this morning?\n[Sep-7 11:29 AM] Eric Williamson: the tests failed and ran with beakers preserve=on-fail option\n[Sep-7 11:29 AM] Eric Williamson: which to my knowledge just leaves them up and running for the default lifetime\n[Sep-7 11:30 AM] Michael Stahnke: @HeathSeals you around?\n[Sep-7 11:31 AM] Heath Seals: @stahnma yup.\n[Sep-7 11:31 AM] Michael Stahnke: see fun with vmpooler above ^^\n[Sep-7 11:31 AM] Michael Stahnke: Not sure what's up, but seems janky\n[Sep-7 11:31 AM] Michael Stahnke: @rick you as well ^^\n[Sep-7 11:32 AM] Heath Seals: yeah.  i believe the one james reported was around the time john had issues with the restart.  but it's odd that it handed out a vm that had already been used.\n[Sep-7 11:33 AM] Eric Williamson: Actually correction to that, the 3.8.x job *did* had it back for destruction\n[Sep-7 11:33 AM] Rick Bradley: the vmpooler redis state handling is ironclad, folks\n[Sep-7 11:33 AM] Rick Bradley: nothing could ever go wrong\n[Sep-7 11:33 AM] Heath Seals: yeah, definitely possible something got lost or missed during the restarts and as the pooler passes stuff through the queues.\n[Sep-7 11:33 AM] Eric Williamson: when was the restart? looks like the sles-11 VM being handed back would of happened around 7:27am\n[Sep-7 11:33 AM] Rick Bradley: that's around the right time\n[Sep-7 11:34 AM] Heath Seals: yup.\n[Sep-7 11:34 AM] Eric Williamson: so wait roughly ~7 more hours and if it happens again, panic?\n[Sep-7 11:35 AM] Rick Bradley: I think there are a few threads:\n[Sep-7 11:35 AM] Rick Bradley: - something has been causing performance degradation (at least I'm hearing things that lead me to believe we're starting to look for that, but I haven't been following closely) around the vmpooler\n[Sep-7 11:36 AM] Rick Bradley: - we had a restart this morning, because a PR on puppetlabs-modules for the vmpooler config behaved weirdly\n[Sep-7 11:36 AM] Rick Bradley: - vmpooler's state handling for the various redis-based queues is sometimes iffy at best\n[Sep-7 11:36 AM] Rick Bradley: given the restart, I think the latter thing is the hazy \"explanation\" for handing out un-recycled vm(s)\n[Sep-7 11:37 AM] Rick Bradley: something should've been in one queue for reclamation, ended up after restart in another queue\n[Sep-7 11:37 AM] Rick Bradley: I don't know how the transition happens off the top of my head but I wouldn't be surprised that a timeout or error state would make vmpooler decide \"that vm must've come back up after refresh now\", when in fact that wasn't the case\n[Sep-7 11:38 AM] Rick Bradley: as for the restart, I have no real evidence, other than seeing the logs John saw when he applied his simple puppet change, but it looked like a lot of changes happening", "created": "2016-09-08T09:49:00.000000"}, {"author": "557058:42ee807e-7bdc-4d08-8c59-b269f42cee43", "body": "cc [~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] [~accountid:63d40635a05386069cdb69d6] [~accountid:557058:295d7a84-a09b-4348-8961-a1e1764c190e] [~accountid:557058:b6301e26-c165-42d0-ab3a-59124339555e] [~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1]", "created": "2016-09-08T09:51:00.000000"}, {"author": "557058:42ee807e-7bdc-4d08-8c59-b269f42cee43", "body": "As part of the earlier history of this, at 7:30am Pacific Time on Wednesday, September 7th, there was a deployment to the vmpooler node of a PR (https://github.com/puppetlabs/puppetlabs-modules/pull/6416) to switch some Windows-related VM images.\n\nThis ran into some sort of problem (I think Heath and John know more about this), and the vmpooler didn't restart successfully.  A re-deploy worked.\n\nIt seems likely that the state of various vmpooler keys in redis for vmpooler pools is out of sync with the state of managed vms.", "created": "2016-09-08T09:55:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "Digging deeper into this particular vm ( eecwj564v125057 ),  it was cloned at {noformat}2016-09-07 06:14:41 -0700{noformat} and claims to have been checked out at {noformat}2016-09-07 22:16:12 -0700{noformat}, but PE 3.x bits hit the disk here {noformat}Sep 07 07:06:20 Installed: pe-puppet-enterprise-release-3.7.2.0-1.pe.el7.noarch{noformat}.  That timing definitely corresponds with the vmpooler/redis issues from yesterday and like [~accountid:557058:42ee807e-7bdc-4d08-8c59-b269f42cee43] mentioned above, vmpooler keys from a brief period of time most likely don't match reality.", "created": "2016-09-08T10:10:00.000000"}, {"author": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "body": "Added {{f2436qnd9ef5wgo}} Scientific 5 32 last night, and have generalised this ticket to track any additional instances that we come across during integration runs.", "created": "2016-09-09T06:31:00.000000"}, {"author": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "body": "Added {{ivxyq6j2neh2jj3}} Oracle6 32", "created": "2016-09-09T09:11:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "i've flushed the sci-5-32 pool ( one of the problem pools and it still had a number of vms that had been running a while ).\u2002\u2002oracle-7-64 was already full of recently provisioned vms and oracle-6-32 only had one older vm lingering ( it has been flushed ).  it's possible we'll run into more unless we do a  1) mass flush of VMs that were running ( and still running )  or  2) identify/flush pools being used by jobs during the affected time frame.", "created": "2016-09-09T10:55:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "[~accountid:557058:74ced041-721a-48ec-853a-35c3cf9ebfa9] have you run into any other instances of dirty vms being returned by the pooler?", "created": "2016-09-14T08:32:00.000000"}, {"author": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "body": "[~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] not in the past few days", "created": "2016-09-14T08:37:00.000000"}, {"author": "557058:91233464-4152-4228-81dd-172d43a52a03", "body": "It looks like we're seeing some new instances of this, tracking in POOLER-64.", "created": "2017-01-17T16:21:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Has this continued to be an issue?", "created": "2018-04-16T13:16:00.000000"}], "components": [], "created": "2016-09-08T09:46:00.000000", "creator": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@68d222bd"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz8qkn:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "08/Sep/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_513936728_*|*_3_*:*_1_*:*_57923988395_*|*_6_*:*_1_*:*_0"}], "description": "h4. eecwj564v125057 - Oracle7\nDuring a run of the \"Project pe-acceptance-tests (2016.3.x) 30: Monolithic LEI Smoke Integration (nightly)\" job it failed due to what appears to have been a dirty oracle7 vmpooler instance being presented to beaker.\n\n- eecwj564v125057.delivery.puppetlabs.net\n\nFirst clue was a check of the {{~/.ssh/environment}} which usually does not result in a match.\n{code}\neecwj564v125057.delivery.puppetlabs.net (oracle7-64-2) 22:16:18$ grep ^PATH=.*PATH ~/.ssh/environment\n PATH=/opt/puppetlabs/bin:$PATH:PATH:/opt/puppet/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin\neecwj564v125057.delivery.puppetlabs.net (oracle7-64-2) executed in 0.01 seconds\n{code}\n\nAnd then a more obvious problem with the {{puppet-agent}} package already being installed on the system.\n\n{code}\n          Installing : puppet-agent-1.6.2.121.gd6a6452-1.el7.x86_64                1/22         \n        warning: /etc/puppetlabs/mcollective/server.cfg created as /etc/puppetlabs/mcollective/server.cfg.rpmnew\n        warning: /etc/puppetlabs/puppet/puppet.conf created as /etc/puppetlabs/puppet/puppet.conf.rpmnew\n          Erasing    : pe-agent-3.7.2-1.pe.el7.noarch                              2/22         \n          Erasing    : pe-puppet-3.7.4.0-1.pe.el7.noarch                           3/22         \n          Erasing    : pe-mcollective-2.6.0.7-1.pe.el7.noarch                      4/22\n...snip...\n{code}\n\nFull log from the failing [build #58|https://jenkins-enterprise.delivery.puppetlabs.net/job/enterprise_pe-acceptance-tests_integration-system_pe_lei-mono_nightly_2016.3.x/58/] attached\n\nh4. f2436qnd9ef5wgo - Scientific 5 32\n\nEncountered this in the following build.\n\n- [pe-acceptance-tests (2016.3.x) 21: Frictionless Agent Upgrade Integration (nightly)|https://jenkins-enterprise.delivery.puppetlabs.net/job/enterprise_pe-acceptance-tests_integration-system_pe_full-agent-upgrade-frictionless_nightly_2016.3.x/LAYOUT=centos6-64mcd-scientific5-32f-64f,LEGACY_AGENT_VERSION=NONE,PLATFORM=NOTUSED,SCM_BRANCH=2016.3.x,UPGRADE_FROM=2016.1.2,label=beaker-bigjob/54/]\n\nh4. ivxyq6j2neh2jj3 oracle6 32\n\nEncountered in the following build:\n\n- [pe-acceptance-tests (3.8.x) 21: Full Integration|https://jenkins-enterprise.delivery.puppetlabs.net/job/enterprise_pe-acceptance-tests_integration-system_pe_full_3.8.x/LAYOUT=64mcd-32a-64f,LEGACY_AGENT_VERSION=NONE,PLATFORM=oracle6,SCM_BRANCH=3.8.x,UPGRADE_FROM=NONE,label=beaker-bigjob/504/]", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10136", "fixedVersions": [], "id": "10136", "issueType": "Bug", "key": "POOLER-30", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:74ced041-721a-48ec-853a-35c3cf9ebfa9", "resolution": "Fixed", "resolutionDate": "2018-07-16T18:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Dirty vmpooler instances presented during integration nightly", "timeSpent": "PT0S", "updated": "2018-07-16T18:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:ab1874a9-45ab-4efc-91aa-5200c165b2c4", "body": "I like these ideas. Now to find some time to work on them :)", "created": "2016-09-07T17:35:00.000000"}], "components": ["VM Pooler"], "created": "2016-08-25T15:44:00.000000", "creator": "557058:5f998a8b-08b1-46f5-beb0-60cd1d60b36b", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@34ef1036"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz8htj:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "07/Sep/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1129582140_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_63050192677"}], "description": "It would be really nice if we didn't have to parse vmpooler's logs in Logstash since we actually control the log format. Perhaps we could log JSON to replace the current format. Another idea I had was that we could push structured logs onto a redis queue and have Logstash consume the events. \n\nPart of why I'd like structured logging is so I can correlate vmpooler events with CI jobs. This would be straightforward if vmpooler logged tags when VM's are pulled from a pool. Currently the vmpooler Beaker hypervisor will tag VM's with the project and jenkins build URL. It would allow us to link other metadata that vmpooler provides (like the hypervisor SUT mapping RE-7947).\n\nIdeally it would be nice if vmpooler could collect performance metrics on the VM's it's managing on a per transaction basis. For example, I want to know how many resources were used for a given CI job. Then we could answer questions like \"What is the average IOPS for a given job?\". It would allow us to quickly verify whether or not performance issues are related to capacity or scheduling. For now, I'm using Beaker's performance module to collect data which means that the same logic would need to exist for non-Beaker jobs.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10029", "fixedVersions": [], "id": "10029", "issueType": "Improvement", "key": "POOLER-32", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:5f998a8b-08b1-46f5-beb0-60cd1d60b36b", "resolution": "Won't Do", "resolutionDate": "2018-09-07T11:26:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Add structured logging to vmpooler", "timeSpent": "PT0S", "updated": "2018-09-07T11:26:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "body": "I'm assuming this information is available to us in the Ruby API. +1 on the idea\n\ncc/ [~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b], [~accountid:557058:42ee807e-7bdc-4d08-8c59-b269f42cee43]", "created": "2016-08-22T13:39:00.000000"}, {"author": "557058:42ee807e-7bdc-4d08-8c59-b269f42cee43", "body": "cc [~accountid:557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241] and [~accountid:557058:9479147e-472e-492f-9d5f-2788ee2dd8d0], as we were talking about this being useful in always-be-scheduling down the road as well...", "created": "2016-08-22T13:52:00.000000"}, {"author": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "body": "Related: RE-7572", "created": "2016-08-22T14:09:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Is it sufficient if VM hypervisor information is logged at VM checkout time in vmpooler.log, or would you like to see this information made available elsewhere?", "created": "2016-11-03T10:20:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "If we complete RE-7572 then the information will be available there. I think that's likely sufficient, in addition to the logged data. Speak up if you disagree.", "created": "2017-03-23T15:11:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "We log this data, which I think is reasonable.", "created": "2018-04-18T18:26:00.000000"}], "components": ["VM Pooler"], "created": "2016-08-22T12:34:00.000000", "creator": "557058:5f998a8b-08b1-46f5-beb0-60cd1d60b36b", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@37e45190"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz8e0f:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "22/Aug/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_3806682_*|*_6_*:*_1_*:*_0_*|*_10004_*:*_1_*:*_45907552005_*|*_10005_*:*_1_*:*_6295327790"}], "description": "As part of gaining more insight into troubleshooting performance issues the CI system it would be nice if we had some way of knowing which hypervisor is hosting a given VM. Perhaps this could be added to the vmpooler REST API. Another option would be to log this piece of information. ", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10126", "fixedVersions": [], "id": "10126", "issueType": "Improvement", "key": "POOLER-33", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:5f998a8b-08b1-46f5-beb0-60cd1d60b36b", "resolution": "Fixed", "resolutionDate": "2018-04-18T18:26:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Expose VM to hypervisor mapping through the API.", "timeSpent": "PT0S", "updated": "2018-04-18T18:26:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c1349bef8a60068c79b09", "attachments": [], "comments": [{"author": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "body": "[~accountid:557058:bd39de7e-cb77-4354-a37b-2ff46c578466] sez:\n\ndocument that metric at https://confluence.puppetlabs.com/display/OPS/Existing+Graphite+Metrics", "created": "2016-08-16T11:16:00.000000"}, {"author": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "body": "Per our conversation, I am assigning this to you prior to my taking leave.", "created": "2016-09-30T11:08:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I think this is fixed by splitting vmpooler and vmpooler-cinext.", "created": "2018-07-16T18:26:00.000000"}], "components": ["VM Pooler"], "created": "2016-08-15T11:26:00.000000", "creator": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@8b48b6f"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz88bj:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_155372_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_60505084114"}], "description": "We should add a metric to Graphite which will allow us to track the percentage of vmpooler VM requests which were made on an ad-hoc basis for non-CI usage.\n\nHaving this information will allow us to make more intelligent decisions in the long term.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10133", "fixedVersions": [], "id": "10133", "issueType": "Improvement", "key": "POOLER-54", "labels": ["wishlist"], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "resolution": "Fixed", "resolutionDate": "2018-07-16T18:26:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Track VMpooler Ad-Hoc Percent in Graphite", "timeSpent": "PT0S", "updated": "2018-07-16T18:26:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "body": "Don't have strong opinions on what this should be. 7 day? 14 days?\n\nIs there any downside (other than the computational cost) in doing this more frequently?", "created": "2016-08-05T11:40:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This is covered by the global settings under config for vm_lifetime and vm_lifetime_auth. TTLs are configurable on a per-pool basis to set TTLs for the VMs waiting to be used.", "created": "2017-07-12T16:20:00.000000"}], "components": ["VM Pooler"], "created": "2016-08-05T11:39:00.000000", "creator": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@78c17b9e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz80sv:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "12/Jul/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_18263025_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_29460976201"}], "description": "Per my conversation with Rick Sherman, this is a request to add a TTL to all pools in vmpooler.\n\nThe reasoning behind this request is the desire to reduce the number of stale / long running instances in the environment, and increase turnover in less-used pools, thereby promoting better workload balance.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10049", "fixedVersions": [], "id": "10049", "issueType": "Improvement", "key": "POOLER-27", "labels": [], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "resolution": "Fixed", "resolutionDate": "2017-07-12T16:20:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Request to Configure TTL for All Pools", "timeSpent": "PT0S", "updated": "2017-07-12T16:20:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "body": "[~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] has seen this happen before also.", "created": "2016-07-22T15:45:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I believe this has been fixed. Please reopen if that is not the case.", "created": "2018-04-18T18:29:00.000000"}], "components": [], "created": "2016-07-22T15:44:00.000000", "creator": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3481e4d8"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz7pnz:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "19/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_7557_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_54873876406"}], "description": "While working with packer templates it seems vmpooler is able to successful clone and bring systems online, but then destroys them a short time later without giving a reason.\n\nI was able to open and maintain a SSH connection to a system before it was reaped and looking through the vmpooler code there shouldn't be any instances where this would happen without logging.\n\nThe template experiencing this is packer/ubuntu-10.04-x86_64-0.0.2", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10158", "fixedVersions": [], "id": "10158", "issueType": "Bug", "key": "POOLER-20", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "resolution": "Fixed", "resolutionDate": "2018-04-18T18:29:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler destroys ready vm without logging reason", "timeSpent": "PT0S", "updated": "2018-04-18T18:29:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [{"attacher": "63d40628f6e1b543161789a7", "created": "2016-07-19T12:32:00.000000", "name": "Screen Shot 2016-07-19 at 10.50.37 AM.png", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10010"}], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This should be resolved with this change [https://github.com/puppetlabs/vmpooler/pull/256]\u00a0. It's possible that it's resolved already, but the change linked actively reduces pool sizes when in excess of the configured value, which did not happen prior.", "created": "2018-05-22T16:38:00.000000"}], "components": ["VM Pooler"], "created": "2016-07-19T12:33:00.000000", "creator": "557058:91233464-4152-4228-81dd-172d43a52a03", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3ab78400"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz7mg7:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "22/May/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_628208253_*|*_10009_*:*_1_*:*_1109876803_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_57447246779"}], "description": "While debugging an issue with {{vmpooler-cinext}} I noticed that sometimes a pool will have more ready VMs than the max ready limit. In reviewing our dashboards for production {{vmpooler}} it looks like it happens fairly\u00a0regularly. For example, the attached image shows the last 24 hours for the centos-6-x86_64 VM whose max limit is supposed to be 25, yet it hit 26 at various times.\n\nThis is occurring in production vmpooler which doesn't have the changes [~accountid:557058:42ee807e-7bdc-4d08-8c59-b269f42cee43] and I made in QENG-3919.\n\nI think this particular issue can occur as follows:\n\n# A single client requests 2 VMs, popping {{vmpooler\\_\\_ready\\_\\_<template>}} twice.\n# The background [{{check_pool}} thread|https://github.com/puppetlabs/vmpooler/blob/b59a1f888656d29a748bd231b0e420fe8727eaeb/lib/vmpooler/pool_manager.rb#L564-L605] for that template asynchronously computes the difference between the total pool size and (ready+pending). Assume it calls {{clone_vm}} once, but we then reach the max number of concurrent clone operations, so the second {{clone_vm}} call is skipped. The check_pool thread finishes its loop and sleeps.\n# The first {{clone_vm}} operation executes asynchronously. Hornet (vmpooler) is running with load average of ~ 15 and is using > 600% CPU (see below).\n# If the {{check_pool}} thread is scheduled again before the first {{clone_vm}} thread increments {{pending}} then the {{check_pool}} thread will think 2 VMs are still needed, not realizing the previous clone is still in-progress.\n\n{noformat}\ntop - 11:30:49 up 210 days, 16:22,  1 user,  load average: 15.66, 15.22, 14.91\nTasks: 129 total,   3 running, 126 sleeping,   0 stopped,   0 zombie\n%Cpu(s): 68.8 us,  8.0 sy,  5.3 ni, 15.9 id,  0.0 wa,  0.0 hi,  1.9 si,  0.0 st\nKiB Mem:   4062880 total,  3718368 used,   344512 free,    49016 buffers\nKiB Swap:   892924 total,   585168 used,   307756 free,   470028 cached\n\n  PID USER      PR  NI  VIRT  RES  SHR S  %CPU %MEM    TIME+  COMMAND\n19737 root      20   0 1607m 796m 9.8m S 634.7 20.1 118104:13 java\n{noformat}\n\nI'm not sure how to actually fix vmpooler, because it's not using redis transactions. So there is no guarantee the data it is reading is consistent as it's being read/modified by different threads.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10157", "fixedVersions": [], "id": "10157", "issueType": "Bug", "key": "POOLER-21", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:91233464-4152-4228-81dd-172d43a52a03", "resolution": "Fixed", "resolutionDate": "2018-06-04T12:55:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler exceeds pool limits", "timeSpent": "PT0S", "updated": "2018-06-04T12:55:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "attachments": [], "comments": [{"author": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "body": "Per our conversation, I am assigning to you before I take leave.", "created": "2016-09-30T11:17:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I don't think this is of great concern compared to the number of connections that are being used to vsphere. New threads allow jruby to distribute processes across multiple cores. We will encounter more of a definitive limitation with vsphere connections, and stand to benefit more from sharing those across pools. We could consider a more tightly controlled threading model, but I suspect our efforts are better spent considering how best to use connection pools.", "created": "2016-10-25T17:13:00.000000"}], "components": ["VM Pooler"], "created": "2016-06-29T16:09:00.000000", "creator": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5ffcff6c"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz7adb:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "25/Oct/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "null_*:*_1_*:*_0_*|*_1_*:*_1_*:*_3006342620_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_2_*:*_20231014494"}], "description": "Currently vmpooler spawns and manages a thread for each of its pools. Due to some inefficiencies inherent in this, we should evaluate using thread pools.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10156", "fixedVersions": [], "id": "10156", "issueType": "Improvement", "key": "POOLER-53", "labels": ["wishlist"], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "resolution": "Won't Do", "resolutionDate": "2017-07-12T15:24:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Evaluate VMpooler Threading Efficiency", "timeSpent": "PT0S", "updated": "2017-07-12T15:24:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "attachments": [], "comments": [{"author": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "body": "Per our conversation, I am assigning to you before I take leave.", "created": "2016-09-30T11:17:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "I've been doing some research and I have some ideas on how to use a vSphere Connection Pooler.\n\nFirstly we could create our own vSphere connection pool using the generic connection_pool gem  https://rubygems.org/gems/connection_pool/versions/2.2.0 .  This seems to be a fairly simple connection handler and should be straightforward to implement.  A lot of the code in the vSphere helper should not need to change.  The only downside to this gem is it won't reduce the pool if it's not in use (unless vSphere kills the connection) also it does not expose any metrics so you don't know how full or empty the pool is.\n\nAlternatively we could create a connection pool using the concurrent_ruby gem (https://github.com/ruby-concurrency/concurrent-ruby) using read/write locks on a thread-safe hash table.  This means we can implement some metrics methods and optionally purge connections that have gone stale.\n\nI envisage that the connection pooler will be a separate class to the VSphere Helper, but is consumed by the helper.", "created": "2017-03-04T22:40:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I ended up implementing something basic that uses task_limit to set a hard limit on the number of connections pool_manager can work with. This would bring our production setup down from ~80 connections to 10. I've got a PR up to vmpooler now.", "created": "2017-04-13T15:28:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "https://github.com/puppetlabs/puppetlabs-modules/pull/7162", "created": "2017-05-31T17:51:00.000000"}, {"author": "557058:6713b848-54a4-4f2e-9636-a24860d9c3f2", "body": "Resolved in https://github.com/puppetlabs/vmpooler/commit/1fcb19bd7bedda0d930af743e43ae1ae5b79dd6f", "created": "2017-05-31T17:51:00.000000"}], "components": ["VM Pooler"], "created": "2016-06-29T15:54:00.000000", "creator": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@27edba6e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-51"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz7acf:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "05/Mar/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "null_*:*_1_*:*_0_*|*_1_*:*_1_*:*_3007211224_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_2_*:*_16611112997"}], "description": "Currently vmpooler establishes and maintains a lot of connections. We want to investigate making more efficient usage of both connections and queries against VMware to reduce load on the management plane.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2017", "estimate": "PT0S", "externalId": "10045", "fixedVersions": [], "id": "10045", "issueType": "Improvement", "key": "POOLER-52", "labels": ["wishlist"], "originalEstimate": "PT0S", "parent": "10119", "parentSummary": "All optimization and improvements tickets targeted for 2017", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:fae769e7-41fe-4341-8da6-88fef69e35e8", "resolution": "Fixed", "resolutionDate": "2017-05-31T17:51:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Evaluate VMpooler Connection Pooling and Handling", "timeSpent": "PT0S", "updated": "2017-05-31T17:51:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:3bbf1c83-df0e-4372-887d-cfc38dee9330", "body": "[~accountid:557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d], when you have a chance, could you please take a peek at this? ", "created": "2016-06-27T17:05:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I think the easiest way to support this is to send the data to graphite when a VM is deployed, and send an update when a VM is migrated. When next updating vmpooler I should be able to address this. It may not be until next quarter.", "created": "2017-03-23T15:11:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "We report a VM's parent host in vmpooler log and collect host utilization data. Most specifically CPU ready time relates directly to performance, so in instances of transients involving a particular VM we can identify its parent host and how it was performing at a given point in time. I believe this means this has been resolved. Please chime in if you disagree.", "created": "2017-11-15T16:32:00.000000"}], "components": [], "created": "2016-06-23T15:10:00.000000", "creator": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5d6aa494"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hymxaf:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "27/Jun/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_352530985_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_43720041104"}], "description": "Goals:\n* improve our understanding of vmpooler hypervisor utilization characteristics \n* more readily associate specific instances of transient CI failures with periods of high resource usage in vESO (this in turn will allow us to establish alert levels for vESO resource usage)\n\nWhen I talk about \"resources\" here I am referring to hypervisor resources such as network, memory, and CPU.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10155", "fixedVersions": [], "id": "10155", "issueType": "Improvement", "key": "POOLER-29", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "resolution": "Fixed", "resolutionDate": "2017-11-15T16:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Add hypervisor data to vmpooler logs and graphite stats collection", "timeSpent": "PT0S", "updated": "2017-11-15T16:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "body": "This may have actually been fixed already by the ci.next merge...", "created": "2016-08-09T09:21:00.000000"}], "components": [], "created": "2016-05-10T15:40:00.000000", "creator": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@65edba16"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz68k7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_93367_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_73422529026"}], "description": "If there are no aliases in the configuration file the alias_deref function throws an exception during failed API calls.\n\n{{2016-05-10 16:14:06 - NoMethodError - undefined method `[]' for nil:NilClass:\n\t/home/rsherman/workspace/vmpooler/lib/vmpooler/api/v1.rb:48:in `block in alias_deref'\n\torg/jruby/RubyHash.java:1339:in `each'\n\t/home/rsherman/workspace/vmpooler/lib/vmpooler/api/v1.rb:44:in `alias_deref'\n\t/home/rsherman/workspace/vmpooler/lib/vmpooler/api/v1.rb:393:in `block in POST /api/v1/vm/:template/?'\n\torg/jruby/RubyMethod.java:111:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1611:in `block in compile!'\n\torg/jruby/RubyProc.java:318:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:975:in `block in route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:994:in `route_eval'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:975:in `block in route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1015:in `block in process_route'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1013:in `process_route'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:973:in `block in route!'\n\torg/jruby/RubyArray.java:1560:in `each'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:972:in `route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1085:in `block in dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1082:in `dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `block in call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:895:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/nulllogger.rb:9:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/head.rb:13:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/show_exceptions.rb:25:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:182:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:2013:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:954:in `forward'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1028:in `route_missing'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:989:in `route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:985:in `route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1085:in `block in dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1082:in `dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `block in call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:895:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/nulllogger.rb:9:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/head.rb:13:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/show_exceptions.rb:25:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:182:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:2013:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:954:in `forward'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1028:in `route_missing'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:989:in `route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:985:in `route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1085:in `block in dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1082:in `dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `block in call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:895:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/nulllogger.rb:9:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/head.rb:13:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/show_exceptions.rb:25:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:182:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:2013:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:954:in `forward'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1028:in `route_missing'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:989:in `route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:985:in `route!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1085:in `block in dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1082:in `dispatch!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `block in call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `block in invoke'\n\torg/jruby/RubyKernel.java:1096:in `catch'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1067:in `invoke'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:907:in `call!'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:895:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/nulllogger.rb:9:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/head.rb:13:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/show_exceptions.rb:25:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:182:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:2013:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/xss_header.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/path_traversal.rb:16:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/json_csrf.rb:18:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/base.rb:49:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-protection-1.5.3/lib/rack/protection/frame_options.rb:31:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/nulllogger.rb:9:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/head.rb:13:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:182:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:2013:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1487:in `block in call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1787:in `synchronize'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/sinatra-1.4.7/lib/sinatra/base.rb:1487:in `call'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/gems/shared/gems/rack-1.6.4/lib/rack/handler/webrick.rb:88:in `service'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/stdlib/webrick/httpserver.rb:138:in `service'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/stdlib/webrick/httpserver.rb:94:in `run'\n\t/home/rsherman/.rbenv/versions/jruby-9.0.5.0/lib/ruby/stdlib/webrick/server.rb:294:in `block in start_thread'\n127.0.0.1 - - [10/May/2016:16:14:06 CDT] \"POST /api/v1/vm/debian-7-x86_641 HTTP/1.1\" 500 330420\n- -> /api/v1/vm/debian-7-x86_641\n}}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10042", "fixedVersions": [], "id": "10042", "issueType": "Bug", "key": "POOLER-22", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Minor", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "resolution": "Fixed", "resolutionDate": "2018-09-07T10:50:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler - no aliases causes exception in API", "timeSpent": "PT0S", "updated": "2018-09-07T10:50:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "attachments": [{"attacher": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "created": "2016-04-22T11:41:00.000000", "name": "vmpooler.vmpooler.log.gz", "uri": "https://puppet.atlassian.net/rest/api/3/attachment/content/10009"}], "comments": [{"author": "557058:42ee807e-7bdc-4d08-8c59-b269f42cee43", "body": "Given that we use vmpooler in CI.now, we should probably understand this regardless of direction forward :-)\n\nWe'll be using the vmpooler in some form or another for CI.next (if for no other reason than changing two major variables seems like a big risk). I understand that down the road eventually capacity on openstack and/or EC2 (elastic) are in the plans, but I'm not personally worrying about that.  When we get CI.next humming along well enough we can move capacity to those platforms.", "created": "2016-04-25T05:22:00.000000"}, {"author": "63d40635a05386069cdb69d6", "body": "[~accountid:557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d] would you like to take a look at this? ", "created": "2016-04-27T10:17:00.000000"}, {"author": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "body": "Sure - it's a known issue that we should handle gracefully.", "created": "2016-04-27T10:18:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Vmpooler will recover if this recurs.", "created": "2018-04-18T18:28:00.000000"}], "components": [], "created": "2016-04-22T11:45:00.000000", "creator": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@5a918bf8"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hynmaf:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "25/Apr/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_426769087_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_62323815560"}], "description": "During the OPS-8804 incident, a number of vmpooler templates became briefly unavailable. Once the template were confirmed as being online in the vsphere interface vmpooler still did not replenish its pools. I suspect this may have been due to a silently-crashed thread template manager thread (I wasn't able to find anything in the vmpooler log). I suspect this because the pools began filling again once vmpooler was restarted.\n\nThis behavior needs to be better understood and improved as we consider whether or not to use vmpooler in CI.Next (QENG-3779 [~accountid:557058:42ee807e-7bdc-4d08-8c59-b269f42cee43] [~accountid:63d40628f6e1b543161789a7] [~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] [~accountid:557058:63eb8bfe-2856-4cbd-ab65-a0e2d681f15c])", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10026", "fixedVersions": [], "id": "10026", "issueType": "Bug", "key": "POOLER-23", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "resolution": "Fixed", "resolutionDate": "2018-04-18T18:28:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Vmpooler not resilient to missing templates", "timeSpent": "PT0S", "updated": "2018-04-18T18:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I think things should be a little better now. It's still not terribly fast so if your connection is slow it could still be slow. Please reopen if still an issue or comment.", "created": "2018-07-16T18:21:00.000000"}], "components": [], "created": "2016-03-02T10:24:00.000000", "creator": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7bce7948"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz4u7b:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "3.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_4646_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_74847442658"}], "description": "The vmpooler dashboard loads very slow (remotely).\n\nRCA is the api/v1/summary call takes ~60seconds to load and contains ~4.5MB of data.\n\nFix that.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10076", "fixedVersions": [], "id": "10076", "issueType": "Improvement", "key": "POOLER-28", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "resolution": "Cannot Reproduce", "resolutionDate": "2018-07-16T18:21:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler dashboard slow to load", "timeSpent": "PT0S", "updated": "2018-07-16T18:21:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c1349bef8a60068c79b09", "attachments": [], "comments": [{"author": "557058:3bbf1c83-df0e-4372-887d-cfc38dee9330", "body": "[~accountid:557058:468002b0-7677-4624-b195-4b7b813f8dc1] and [~accountid:557058:b64e0a8c-6f57-44f9-adf1-b0b9767a03da], does this description look right to you?", "created": "2016-02-24T12:25:00.000000"}, {"author": "557058:468002b0-7677-4624-b195-4b7b813f8dc1", "body": "!https://assets-cdn.github.com/images/icons/emoji/unicode/1f44d.png|width=24px,height=24px!", "created": "2016-02-24T12:55:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "We might need some help implementing this, but we will take a look and report back.", "created": "2016-02-24T13:51:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "In discussing with the team, a static test fixture might be the best approach.", "created": "2016-03-02T11:12:00.000000"}, {"author": "557058:3bbf1c83-df0e-4372-887d-cfc38dee9330", "body": "Ah, that makes sense. IIUC, I believe the test scenarios require infrastructure to support a minimum of two Oracle clusters: \n* Oracle RAC (a pair of SUTs sharing a set of disks)\n* Oracle Dataguard (a pair of Oracle RAC clusters)\n\n[~accountid:557058:468002b0-7677-4624-b195-4b7b813f8dc1] is probably the best source to confirm or provide more detailed requirements.\n\n", "created": "2016-03-02T11:46:00.000000"}, {"author": "557058:468002b0-7677-4624-b195-4b7b813f8dc1", "body": "Both of those scenarios are desirable by me for ease of debugging and development, however only the second one (a pair of RAC clusters) is implemented by our customer in production.", "created": "2016-03-02T12:39:00.000000"}, {"author": "63d40635a05386069cdb69d6", "body": "[~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] here's another fun one on the pooler. ", "created": "2016-05-03T09:33:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Closing due to inactivity.\n\nIf this is still an issue please comment or reopen the ticket. The original deadline has long since passed.", "created": "2018-07-16T18:20:00.000000"}], "components": [], "created": "2016-02-24T12:24:00.000000", "creator": "557058:3bbf1c83-df0e-4372-887d-cfc38dee9330", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@18f94f33"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz4pl3:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "24/Feb/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_3942059_*|*_6_*:*_1_*:*_0_*|*_10004_*:*_1_*:*_1267514_*|*_10005_*:*_1_*:*_75439789942"}], "description": "* Modules team is working on Oracle database module that will require testing of Oracle RAC.\n* Likely test cases may require that pairs of vmpooler SUTs share disks (that are managed on the SUTs by Oracle ASM). \n* This requirement sounds similar to QENG-3359, but the additional disks would need to added to multiple vmpooler nodes. \n* There may be additional requirements. https://www.vmware.com/files/pdf/solutions/oracle/Oracle_Databases_VMware_RAC_Deployment_Guide.pdf (or a more updated version) might be a good reference.\n* Need by date: April 2016. \n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10095", "fixedVersions": [], "id": "10095", "issueType": "Improvement", "key": "POOLER-18", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:3bbf1c83-df0e-4372-887d-cfc38dee9330", "resolution": "Won't Do", "resolutionDate": "2018-07-16T18:20:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "VMpooler support for addition of the same disk to multiple VMs (supports OracleDB testing)", "timeSpent": "PT0S", "updated": "2018-07-16T18:21:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c1349bef8a60068c79b09", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This happened.", "created": "2018-04-18T18:06:00.000000"}], "components": ["VM Pooler"], "created": "2016-02-19T02:01:00.000000", "creator": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@37ed073a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyo5rz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "19/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_312337365_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_67911570972"}], "description": "We should add a logrotate job ( weekly? ) for vmpooler hosts.  The current log file on the production host has grown rather large ( 800+MB ).", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10116", "fixedVersions": [], "id": "10116", "issueType": "Improvement", "key": "POOLER-25", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "resolution": "Fixed", "resolutionDate": "2018-04-18T18:06:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "create logrotate job for vmpooler hosts", "timeSpent": "PT0S", "updated": "2018-04-18T18:06:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "body": "Also affects running.", "created": "2016-07-25T16:27:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This has been fixed for pending VMs. It needs to be fixed for running VMs as well.", "created": "2016-11-22T15:57:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I validated that this is fixed for pending and running VMs on master.", "created": "2018-05-31T14:11:00.000000"}], "components": [], "created": "2016-01-29T12:56:00.000000", "creator": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@384c3c0"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hymjfz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "22/Nov/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_266442965_*|*_10007_*:*_1_*:*_451836491_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_2_*:*_72981809806"}], "description": "When VM's are put into the pending state (from false positive clones) the current logic in vmpooler prevents them from being properly reaped.\n\nThey are present in __pending__ but never checked, as such prevents new systems from being cloned.\n\nThis is also true for VM's that are in running, but no longer in ESXi.\n\n[https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/pool_manager.rb#L520-L528]\n{code:ruby}\n      # PENDING\n      $redis.smembers('vmpooler__pending__' + pool['name']).each do |vm|\n        if inventory[vm]\n          begin\n            check_pending_vm(vm, pool['name'], pool['timeout'] || $config[:config]['timeout'] || 15)\n          rescue\n          end\n        end\n      end\n{code}", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10189", "fixedVersions": [], "id": "10189", "issueType": "Bug", "key": "POOLER-26", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "resolution": "Fixed", "resolutionDate": "2018-05-31T14:11:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pending VM's in vmpooler not cleared if missing from ESXi", "timeSpent": "PT0S", "updated": "2018-05-31T14:11:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:295d7a84-a09b-4348-8961-a1e1764c190e", "body": "[~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] Suspect I was the cause of this using the following redis-cli command:\n\nrename vmpooler__active__win-10-pro-x86_64  vmpooler__completed__win-10-pro-x86_64\n\nredis 127.0.0.1:6379> smembers vmpooler__completed__win-10-pro-x86_64\n(error) ERR Operation against a key holding the wrong kind of value\n\nOnce I renamed the entries back to active, the problem cleared.\n\n\\** UPDATE ** talked to Heath - the problem happened at 20:15PST, so it probably wasn't me wot did it.\n", "created": "2016-01-29T04:09:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "observed this behavior again this evening after a reboot of the vcenter server.  pretty certain the root cause is the vmpooler not re-establishing connectivity with the vcenter server after network blips ( or scheduled maintenance ).", "created": "2016-02-25T22:58:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "cloning operations hung again on 5/20 at 14:47 after a vCenter restart.  it's seems only cloning is affected, pooler vms continue to be shutdown and destroyed.", "created": "2016-05-22T13:11:00.000000"}, {"author": "557058:9479147e-472e-492f-9d5f-2788ee2dd8d0", "body": "Vmpooler now runs with a connection pool and will retry on API failures or connection issues, so it should not stop cloning operations. Currently we are hitting more issues on the vcenter side of things, both with version 5.5 and 6.5 that impacts refilling pools.", "created": "2017-10-04T11:08:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "For what it's worth the last instance I saw clone operations slow down was due to slow API responses on vmware-vc2 preventing its pools from filling. The task_limit concept is global across all providers so if vc2 has 10 slots occupied for cloning vc1 provider cannot perform any clone operations until those slots are cleared. The short term solution is to increase the task_limit, but long term it is probably worth moving these concepts to be coupled with their provider.\n\nWhen this ticket was created vmpooler would stop clone operations and never recover without a service restart. I believe that problem has been fixed, I haven't seen it recur in the same way as before for many months.", "created": "2017-10-04T12:16:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have not seen these issues recur in some time.", "created": "2018-04-18T16:40:00.000000"}], "components": ["VM Pooler"], "created": "2016-01-29T01:58:00.000000", "creator": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@40deb9f0"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "Epic Link", "fieldType": "com.pyxis.greenhopper.jira:gh-epic-link", "value": "POOLER-99"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyo5tz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "2.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "29/Jan/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_413263468_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_69620051862"}], "description": "It appears that the vmpooler ceased cloning operating around 20:15PST on 01/28.  This has resulted in a number of empty pools.  I've bounced the services and pools are slowly starting to refill.  This isn't the first time we've seen this type of behavior.  We should see if a root cause can be identified.", "epicLinkSummary": "All optimization and improvements tickets targeted for 2018", "estimate": "PT0S", "externalId": "10003", "fixedVersions": [], "id": "10003", "issueType": "Bug", "key": "POOLER-24", "labels": [], "originalEstimate": "PT0S", "parent": "10083", "parentSummary": "All optimization and improvements tickets targeted for 2018", "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "resolution": "Fixed", "resolutionDate": "2018-04-18T16:40:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler occasionally stops cloning operations", "timeSpent": "PT0S", "updated": "2018-04-18T16:40:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c1349bef8a60068c79b09", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Permissions of this file are now 0640.", "created": "2018-04-18T16:11:00.000000"}], "components": [], "created": "2016-01-21T16:52:00.000000", "creator": "557058:750ff3bd-7564-4d8a-b480-b500b85be583", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@70f9539f"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz425z:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "18/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_10710529807_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_59958624044"}], "description": "The vSphere credentials that vmpooler uses are in {{/var/lib/vmpooler/vmpooler.yaml}}. That has {{0644}} permissions.\n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10154", "fixedVersions": [], "id": "10154", "issueType": "Bug", "key": "POOLER-2", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:750ff3bd-7564-4d8a-b480-b500b85be583", "resolution": "Fixed", "resolutionDate": "2018-04-18T16:11:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Credentials stored insecurely in vmpooler configuration", "timeSpent": "PT0S", "updated": "2018-04-18T16:11:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:750ff3bd-7564-4d8a-b480-b500b85be583", "body": "I'm not sure which project this should go into.\n\nI'm happy to help with LDAP stuff.", "created": "2016-01-21T16:46:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "We can pretty easily use the successful bind to search for group membership and validate it, or not, before returning that auth was successful. This should prevent the need for a second bind attempt with the new DN. I've got a POC going in a branch where I'm toying with RBAC, though it hasn't been a priority yet.", "created": "2018-07-17T17:43:00.000000"}, {"author": "6220db96c4d0fe0069535219", "body": "Migrating issue to a GitHub project at https://github.com/orgs/puppetlabs/projects/64/views/1?pane=issue&itemId=34966382", "created": "2023-08-03T06:11:00.000000"}], "components": [], "created": "2016-01-21T16:46:00.000000", "creator": "557058:750ff3bd-7564-4d8a-b480-b500b85be583", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6a2ecfb9"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz41z3:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Jul/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_2_*:*_39720049228_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_197924699064"}], "description": "vmpooler will allow any user in {{ou=users,dc=puppetlabs,dc=com}} to checkout a VM. That includes community users. It should be modified to only accept users within a particular group or matching a particular LDAP query.\n\nSince vmpooler isn't available outside of our network, it's not a critical issue.\n\nAlso, vmpooler will not accept users under {{ou=service,ou=users,dc=puppetlabs,dc=com}}, though perhaps that is another ticket.\n\nh3. Solution\n\nUnfortunately, LDAP doesn't provide a way to cram all of this into the BIND request. You have to authenticate as an application user, search for the user using whatever filter you want, then rebind as the user DN that you found.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10188", "fixedVersions": [], "id": "10188", "issueType": "Bug", "key": "POOLER-4", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:750ff3bd-7564-4d8a-b480-b500b85be583", "resolution": "Incomplete", "resolutionDate": "2023-08-03T06:11:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "03/Aug/23 6:11 AM", "summary": "vmpooler authorizes any LDAP user", "timeSpent": "PT0S", "updated": "2023-08-03T06:11:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "body": "{code:title=The beaker script to add two new hard drive above}\ntest_name 'Add extra hard drive for LVM testing'\n\n\n# Get the auth_token from ENV\nauth_tok = ENV['AUTH_TOKEN']\n\n# On the PE agent where LVM running\nconfine_block(:except, :roles => %w{master dashboard database}) do\n  agents.each do |agent|\n    step 'adding an extra disk'\n    on(agent, \"curl -X POST -H X-AUTH-TOKEN:#{auth_tok} --url vcloud/api/v1/vm/#{agent[:vmhostname]}/disk/1\")\n    sleep(120)\n\n    step 'rescan the SCSI bus on the host to make the newly added hdd recognized'\n    on(agent, \"echo \\\"- - -\\\" >/sys/class/scsi_host/host0/scan\")\n\n\n    #on(agent, \"curl -X POST -H X-AUTH-TOKEN:c00nem2oao477lyc1olnr2k6zgquchxl --url vcloud/api/v1/vm/#{agent[:vmhostname]}/disk/4\")\n    on(agent, \"curl -X POST -H X-AUTH-TOKEN:#{auth_tok} --url vcloud/api/v1/vm/#{agent[:vmhostname]}/disk/1\")\n    sleep(120)\n\n    step 'rescan the SCSI bus on the host to make the newly added hdd recognized'\n    on(agent, \"echo \\\"- - -\\\" >/sys/class/scsi_host/host0/scan\")\n\n    step 'Verify the newly add hdd recognized:'\n    on(agent, \"fdisk -l\") do |result|\n      assert_match(/\\/dev\\/sdc/, result.stdout, \"Unexpected errors is detected\")\n      assert_match(/\\/dev\\/sdd/, result.stdout, \"Unexpected errors is detected\")\n    end\n  end\nend\n{code}", "created": "2016-01-21T12:17:00.000000"}, {"author": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "body": "\n{code:title=Changed from 'blocker' to 'normal' with below work-around:}\nstep 'adding an extra disk: /dev/sdc:'\n    on(agent, \"curl -X POST -H X-AUTH-TOKEN:#{auth_tok} --url vcloud/api/v1/vm/#{agent[:vmhostname]}/disk/1\")\n    sleep(30)\n    step 'rescan the SCSI bus on the host to make the newly added hdd recognized'\n    on(agent, \"echo \\\"- - -\\\" >/sys/class/scsi_host/host0/scan\")\n\n    #keep trying until the hdd is found\n    retry_on(agent, \"fdisk -l | grep \\\"/dev/sdc\\\"\", :max_retries => 360, :retry_interval => 5)\n\n    step 'adding a second extra disk: /dev/sdd:'\n    on(agent, \"curl -X POST -H X-AUTH-TOKEN:#{auth_tok} --url vcloud/api/v1/vm/#{agent[:vmhostname]}/disk/1\")\n    sleep(30)\n    step 'rescan the SCSI bus on the host to make the newly added hdd recognized'\n    on(agent, \"echo \\\"- - -\\\" >/sys/class/scsi_host/host0/scan\")\n\n    #keep trying until the hdd is found\n    retry_on(agent, \"fdisk -l | grep \\\"/dev/sdd\\\"\", :max_retries => 360, :retry_interval => 5)\n{code}", "created": "2016-01-25T08:14:00.000000"}, {"author": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "body": "newly added hard drive takes about 30 min to be seen, is there any other way to get it recognized faster other than rescanning the SCSI bus on the host?\n", "created": "2016-01-25T14:07:00.000000"}, {"author": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "body": "I experienced the problem again yesterday and today, adding extra hdd by calling the API does not always work, sometime it adds 1 hdd and not another one, sometime it doesn't add any HDD, even though I raised the number of trying from 360 times to 420 times with retry_interval is 5 seconds.", "created": "2016-03-03T16:18:00.000000"}, {"author": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "body": "[~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1]  since Scott is not in the house, who should I contact for this ticket? Thx", "created": "2016-03-03T16:21:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "[~accountid:557058:639cd954-0530-45e8-ba35-7be3297421a0] I will follow up with you soon, once I find someone available to look into this. Thanks.", "created": "2016-03-04T11:37:00.000000"}, {"author": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "body": "[~accountid:557058:7306c811-399c-4964-b8cc-74e0ece239a1] I was talking to [~accountid:557058:3bbf1c83-df0e-4372-887d-cfc38dee9330] few weeks ago about this but not sure if he had time to look at this. Do you have any update on this ticket?", "created": "2016-04-27T12:10:00.000000"}, {"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Sorry for the delay, Phong. I will check with RE to see if they can fit this in to an upcoming sprint.", "created": "2016-04-28T10:31:00.000000"}, {"author": "63d40635a05386069cdb69d6", "body": "[~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] or [~accountid:557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d] do you have any cycles to look at this in the next 2 weeks? ", "created": "2016-04-28T10:50:00.000000"}, {"author": "63d40635a05386069cdb69d6", "body": "[~accountid:557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b] ticket we talked about today. Code it up to try harder :)", "created": "2016-05-03T09:32:00.000000"}, {"author": "557058:74b44c9c-9f7d-4d53-b6b6-e598c1652c9b", "body": "I've identified a few issues here.  There's a huge discrepancy between the amount of time the pooler takes to attach the disk and the actual time the task takes to complete within vCenter ( [gist|https://gist.github.com/heathseals/f44b29b34714bc651c6cc1530d30f2ae] ) -- running almost 20 minutes according to the pooler, whereas vCenter is running the reconfigure task in just a few seconds.  There has also been a general upward trend of the disk_manager task for the pooler.  While I couldn't reproduce a failure attaching a single disk, the time it takes really isn't acceptable.\n\nI was able to reproduce a failure of attaching a 2nd disk if the 1st had not yet completed.  The pooler registers the request, but then it seems to then be routed to the bit bucket.  I was able to attach a 2nd disk if I waited until the 1st successfully finished.  Not really desired behavior.\n\nNo fixes yet, but I do have a better idea of where to start digging.\n\n", "created": "2016-05-04T09:52:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "VMpooler queues these operations. There is absolutely no guarantee made by the application about when it will be able to complete the requested disk operation. Rather than rely upon vmpooler to perform this work it may make sense to interact with the VM directly. If that does not seem like a reasonable alternative then it may be worth deploying an additional platform that has the disk requirement resolved from the start.", "created": "2018-04-18T18:11:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "This has been changed significantly and should be faster and more reliable as a result. Please comment or reopen if issues persist.", "created": "2018-07-16T18:27:00.000000"}], "components": [], "created": "2016-01-21T12:14:00.000000", "creator": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@74560309"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "QA Contact", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:userpicker", "value": "557058:639cd954-0530-45e8-ba35-7be3297421a0"}, {"fieldName": "QA Status", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Reviewed"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz419b:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "04/Mar/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8972771042_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_69410770846"}], "description": "This just happened today 1/21, I was able to do it when https://github.com/puppetlabs/vmpooler/pull/147 merged and put my comments in QENG-3359\n\nHowever, it doesn't work today:\nSteps to reproduce:\n{code:title=1. Attempted to add a 1GB-hdd using the below command:}\ncurl -X POST -H X-AUTH-TOKEN:<token> --url vcloud/api/v1/vm/il69x83f85czmre.delivery.puppetlabs.net/disk/1\n{code}\n\n{code:title=2.The above command was executed successfully:}\n{\n    \"ok\": true,\n    \"il69x83f85czmre\": {\n      \"disk\": \"+1gb\"\n    }\n  }\n{code}\n\nWaited for few minutes,....\n\n{code:title=3.Rescan the SCSI bus on the host to recognize the new hard drive:}\necho \"- - -\" >/sys/class/scsi_host/host0/scan\n{code}\n\nVerify if the new hard drive is added by 'fdisk -l' command. \nExpected: new hard drive /dev/sdc will be added\nThere is NO new disk added.\n\nHere is the detail logs of creating 2 hdd using a breaker script:\n{code:title=Attempted to create two new hdd, waiting 10 minutes before rescanning the scsi bus}\nBegin pre-suite/02_add_extra_hdd.rb\n\nAdd extra hard drive for LVM testing\n\n* adding an extra disk\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) 09:46:58$ curl -X POST -H X-AUTH-TOKEN:<token> --url vcloud/api/v1/vm/il69x83f85czmre.delivery.puppetlabs.net/disk/1\n  Attempting ssh connection to il69x83f85czmre.delivery.puppetlabs.net, user: root, opts: {:config=>false, :paranoid=>false, :auth_methods=>[\"publickey\"], :port=>22, :forward_agent=>true, :keys=>[\"/Users/phongly//.ssh/id_rsa-acceptance\"], :user_known_hosts_file=>\"/Users/phongly//.ssh/known_hosts\", :keepalive=>true}\n        %     T  o  t  a  l              %     R  e  c  e  i  v  e  d     %     X  f  e  r  d        A  v  e  r  a  g  e     S  p  e  e  d           T  i  m  e              T  i  m  e                 T  i  m  e        C  u  r  r  e  n  t\n                                                                                                     D  l  o  a  d        U  p  l  o  a  d           T  o  t  a  l           S  p  e  n  t              L  e  f  t        S  p  e  e  d\n        0                 0              0                 0              0                 0                    0                    0     -  -  :  -  -  :  -  -     -  -  :  -  -  :  -  -     -  -  :  -  -  :  -  -                 0        0              6  3              0              6  3              0                 0              1  2  2                    0     -  -  :  -  -  :  -  -     -  -  :  -  -  :  -  -     -  -  :  -  -  :  -  -           1  2  {\n    \"ok\": true,\n    \"il69x83f85czmre\": {\n      \"disk\": \"+1gb\"\n    }\n  }  7\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) executed in 0.74 seconds\n\n* rescan the SCSI bus on the host to make the newly added hdd recognized\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) 09:48:59$ echo \"- - -\" >/sys/class/scsi_host/host0/scan\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) executed in 0.17 seconds\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) 09:48:59$ curl -X POST -H X-AUTH-TOKEN:<token> --url vcloud/api/v1/vm/il69x83f85czmre.delivery.puppetlabs.net/disk/1\n        %     T  o  t  a  l              %     R  e  c  e  i  v  e  d     %     X  f  e  r  d        A  v  e  r  a  g  e     S  p  e  e  d           T  i  m  e              T  i  m  e                 T  i  m  e        C  u  r  r  e  n  t\n                                                                                                     D  l  o  a  d        U  p  l  o  a  d           T  o  t  a  l           S  p  e  n  t              L  e  f  t        S  p  e  e  d\n        0                 0              0                 0              0                 0                    0                    0     -  -  :  -  -  :  -  -     -  -  :  -  -  :  -  -     -  -  :  -  -  :  -  -                 0        0              6  3              0              6  3              0                 0              4  3  1                    0     -  -  :  -  {\n    \"ok\": true,\n    \"il69x83f85czmre\": {\n      \"disk\": \"+1gb\"\n    }\n  }  -:-- --:--:-- --:--:--   450\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) executed in 0.16 seconds\n\n* rescan the SCSI bus on the host to make the newly added hdd recognized\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) 09:50:59$ echo \"- - -\" >/sys/class/scsi_host/host0/scan\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) executed in 0.17 seconds\n\n* Verify the newly add hdd recognized:\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) 09:50:59$ fdisk -l\n  \n  Disk /dev/sda: 8589 MB, 8589934592 bytes\n  64 heads, 32 sectors/track, 8192 cylinders\n  Units = cylinders of 2048 * 512 = 1048576 bytes\n  Sector size (logical/physical): 512 bytes / 512 bytes\n  I/O size (minimum/optimal): 512 bytes / 512 bytes\n  Disk identifier: 0x0000bbe3\n  \n     Device Boot      Start         End      Blocks   Id  System\n  /dev/sda1   *           2         501      512000   83  Linux\n  Partition 1 does not end on cylinder boundary.\n  /dev/sda2             502        8192     7875584   8e  Linux LVM\n  Partition 2 does not end on cylinder boundary.\n  \n  Disk /dev/sdb: 8589 MB, 8589934592 bytes\n  64 heads, 32 sectors/track, 8192 cylinders\n  Units = cylinders of 2048 * 512 = 1048576 bytes\n  Sector size (logical/physical): 512 bytes / 512 bytes\n  I/O size (minimum/optimal): 512 bytes / 512 bytes\n  Disk identifier: 0x0d501d8c\n  \n     Device Boot      Start         End      Blocks   Id  System\n  /dev/sdb1               1        8192     8388592   8e  Linux LVM\n  \n  Disk /dev/mapper/VolGroup-lv_root: 12.4 GB, 12419334144 bytes\n  255 heads, 63 sectors/track, 1509 cylinders\n  Units = cylinders of 16065 * 512 = 8225280 bytes\n  Sector size (logical/physical): 512 bytes / 512 bytes\n  I/O size (minimum/optimal): 512 bytes / 512 bytes\n  Disk identifier: 0x00000000\n  \n  \n  Disk /dev/mapper/VolGroup-lv_swap: 4227 MB, 4227858432 bytes\n  255 heads, 63 sectors/track, 514 cylinders\n  Units = cylinders of 16065 * 512 = 8225280 bytes\n  Sector size (logical/physical): 512 bytes / 512 bytes\n  I/O size (minimum/optimal): 512 bytes / 512 bytes\n  Disk identifier: 0x00000000\n\nil69x83f85czmre.delivery.puppetlabs.net (il69x83f85czmre.delivery.puppetlabs.net) executed in 0.01 seconds\npre-suite/02_add_extra_hdd.rb failed in 241.27 seconds\n{code}\n\n\n\n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10025", "fixedVersions": [], "id": "10025", "issueType": "Bug", "key": "POOLER-6", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:639cd954-0530-45e8-ba35-7be3297421a0", "resolution": "Fixed", "resolutionDate": "2018-07-16T18:27:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "calling vmpooler API to add hdd returns true, but adding hdd doesn't always work", "timeSpent": "PT0S", "updated": "2018-07-16T18:27:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": [], "created": "2016-01-12T12:00:00.000000", "creator": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@16f2abd4"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz3vnb:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_80323720_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_84156639236"}], "description": "At current the vm_lifetime_auth setting is global only.\n\n[https://github.com/puppetlabs/vmpooler/blob/master/lib/vmpooler/api/v1.rb#L65]\n\nWe should be able to add fairly easily with:\n\n\n{code:ruby}\nbackend.hset('vmpooler__vm__' + vm, 'lifetime', (pool['vm_lifetime_auth'] || config['vm_lifetime_auth']).to_i)\n{code}\n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10094", "fixedVersions": [], "id": "10094", "issueType": "New Feature", "key": "POOLER-9", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:b5baf531-c3a9-49d0-b512-74aaa6e4335d", "resolution": "Won't Do", "resolutionDate": "2018-09-13T12:10:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler 'vm_lifetime_auth' should be configurable by pool", "timeSpent": "PT0S", "updated": "2018-09-13T12:10:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I have configured vmpooler to use the ops wildcard certificate. It shows as being valid when I connect via ssl to the web interface now. A pull request is in to make the change on all vmpooler instances.", "created": "2016-11-18T10:10:00.000000"}], "components": ["VM Pooler"], "created": "2015-12-28T16:36:00.000000", "creator": "557058:40232c77-9d9b-410c-9f53-90adbf41eeb9", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3d0af44e"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz3p0v:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "18/Nov/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_583946059_*|*_10007_*:*_1_*:*_269240039_*|*_10009_*:*_1_*:*_52569_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_27559217622"}], "description": "Since it wants me to send it my password I'd like to be able to use verified SSL with vmpooler. It currently uses a cert that's only valid for *.puppetlabs.com.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10002", "fixedVersions": [], "id": "10002", "issueType": "Improvement", "key": "POOLER-14", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:40232c77-9d9b-410c-9f53-90adbf41eeb9", "resolution": "Fixed", "resolutionDate": "2016-11-21T12:57:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "vmpooler should have a valid SSL certificate", "timeSpent": "PT0S", "updated": "2016-11-21T12:57:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c0dfd8d8b9c0068b8af97", "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "[~accountid:623c0dfd8d8b9c0068b8af97] can you take a look at this one?", "created": "2015-11-20T16:10:00.000000"}, {"author": "623c0dfd8d8b9c0068b8af97", "body": "I am not sure there is much to be investigated at this time. The VM no longer exists in the pooler and the linked build has been removed.\n\nCan this ticket be closed?", "created": "2015-12-30T10:09:00.000000"}, {"author": "557058:c9e9d862-990b-48eb-b4e3-2e4daf1e6920", "body": "Is there any logging we can enhance for situations like this? I.e. to enable debugging or troubleshooting.", "created": "2015-12-30T13:52:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "We don't have logging for the VM's view of things. I think the jenkins job usually has the most useful information. We can correlate the VM to its host and see if it was performing reasonably, as sometimes spikes can cause these issues. Otherwise, I probably cannot take any additional action for this specific failure.", "created": "2018-04-18T18:36:00.000000"}], "components": [], "created": "2015-11-20T15:51:00.000000", "creator": "557058:c9e9d862-990b-48eb-b4e3-2e4daf1e6920", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@8d08827"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz38w7:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "20/Nov/15"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_1143239_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_76037143823"}], "description": "This has been running for 3+ hours: \n\nhttps://jenkins.puppetlabs.com/view/All%20in%20One%20Agent/view/Stable/view/Puppet%20Agent%20Daily/job/platform_aio-suite_pkg-build_stable/BUILD_TARGET=win-x86,SLAVE_LABEL=beaker/216/\n\nThe vm was hnkol3xfwrgpzpc.delivery.puppetlabs.net but I can't resolve it now. Maybe it was reaped?", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10187", "fixedVersions": [], "id": "10187", "issueType": "Bug", "key": "POOLER-3", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:c9e9d862-990b-48eb-b4e3-2e4daf1e6920", "resolution": "Won't Fix", "resolutionDate": "2018-04-18T18:36:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Hung or stalled build on pooler vm", "timeSpent": "PT0S", "updated": "2018-04-18T18:36:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "attachments": [], "comments": [], "components": [], "created": "2015-11-19T12:20:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@2c389e2d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz37tb:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9146_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_42670897089"}], "description": "It looks like the pool size for cisco-exr-9k-x86_64 is 1, and we're getting alerts in the ESO Noise room in hipchat whenever this is used. Can this be increased to 5?", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10186", "fixedVersions": [], "id": "10186", "issueType": "Improvement", "key": "POOLER-16", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Won't Do", "resolutionDate": "2017-03-27T10:21:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Increase pool size for cisco-exr-9k-x86_64", "timeSpent": "PT0S", "updated": "2017-03-27T10:21:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c1349bef8a60068c79b09", "attachments": [], "comments": [], "components": [], "created": "2015-11-19T12:17:00.000000", "creator": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@1d40b4dc"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz37t3:"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_14168_*|*_5_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_42671099094"}], "description": "There was an alert today in the ESO Noise hipchat channel:\n\nMonitoring Host: mon-icinga1-prod\nHost: hornet.delivery.puppetlabs.net\nService Description: check_vmpooler_hornet.delivery.puppetlabs.net\nOutput: Critical: Found 1 empty pools. (cisco-exr-9k-x86_64)\nState: CRITICAL\nPlaybook: Confluence\n\nBut there isn't an entry for this pool on our vmpooler pool usage dashboard: http://grafana.ops.puppetlabs.net:8080/#/dashboard/elasticsearch/vmpooler%20pool%20usage\n\nWe need to ensure all recently added VMs are visible on this dashboard. Also, is there any way we can ensure this is updated any time a new VM is added?", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10125", "fixedVersions": [], "id": "10125", "issueType": "Improvement", "key": "POOLER-15", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "resolution": "Fixed", "resolutionDate": "2017-03-27T10:22:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Update VMPooler pool usage dashboard with recently added VMs", "timeSpent": "PT0S", "updated": "2017-03-27T10:22:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "[~accountid:557058:c7e2df03-377d-4bf0-9407-106b8483430d], I'm trying to clean up some old pooler tickets. Is this still an issue for you? If so, I'll find someone who can help. Given the age of the ticket, I assume this was fixed a while back. Thanks.", "created": "2017-03-27T10:24:00.000000"}], "components": [], "created": "2015-11-18T05:49:00.000000", "creator": "557058:c7e2df03-377d-4bf0-9407-106b8483430d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@648c3a8d"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz362f:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "27/Mar/17"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_32752209_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_42766955556"}], "description": "I'm trying to generate a vmpooler token using [these instructions|https://confluence.puppetlabs.com/display/QE/Generating+and+using+vmpooler+tokens] but all I get back is a JSON with OK = false. I'm using the same credentials as when logging into confluence or JIRA. Perhaps my account 'thomas.hallgren' hasn't been added to the vmpooler?\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10001", "fixedVersions": [], "id": "10001", "issueType": "Bug", "key": "POOLER-5", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:c7e2df03-377d-4bf0-9407-106b8483430d", "resolution": "Fixed", "resolutionDate": "2017-03-27T15:37:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Unable to generate vmpooler tokens", "timeSpent": "PT0S", "updated": "2017-03-27T15:37:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "attachments": [], "comments": [{"author": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "body": "See also: RE-5809.\n\nA few questions and statements:\n\n- Are these different \"sizes\" of hosts needed for automated testing, manual testing, or both?\n- We could use something like https://github.com/puppetlabs/vmwr, which I believe Ops already has deployed somewhere, to provision different-sized VMs on-demand", "created": "2015-10-29T13:25:00.000000"}, {"author": "557058:151c148a-2bb4-4fdc-9320-df94e73b0363", "body": "[~accountid:557058:a2383cac-f799-49a4-8b30-0d737d991e23]\n\n- Are these different \"sizes\" of hosts needed for automated testing, manual testing, or both?\n-- {color:red}Used for both, but mostly for automation in CI.{color}\n- We could use something like https://github.com/puppetlabs/vmwr, which I believe Ops already has deployed somewhere, to provision different-sized VMs on-demand", "created": "2015-10-29T15:44:00.000000"}, {"author": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "body": "PR to add disks via the vmpooler API: https://github.com/puppetlabs/vmpooler/pull/147", "created": "2016-01-13T11:14:00.000000"}, {"author": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "body": "Disk-adding has been merged and deployed to production.", "created": "2016-01-14T16:50:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "Is this still required beyond the disk capability that was already added?", "created": "2018-04-18T18:32:00.000000"}], "components": [], "created": "2015-10-29T11:35:00.000000", "creator": "557058:151c148a-2bb4-4fdc-9320-df94e73b0363", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6c10d511"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz2upb:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "29/Oct/15"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_9371_*|*_6_*:*_1_*:*_0_*|*_10004_*:*_1_*:*_7689316682_*|*_10005_*:*_1_*:*_77957786489"}], "description": "The necessity of having vmpooler templates with more resources (i.e. disk space and RAM) is becoming more prevelant. It would be awesome if the vmpooler supported grabbing instances of machines with dynamic resource allocation. I was thinking of a similar feature set that AWS provides for instance sizes like micro, large and xlarge.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10153", "fixedVersions": [], "id": "10153", "issueType": "New Feature", "key": "POOLER-10", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:151c148a-2bb4-4fdc-9320-df94e73b0363", "resolution": "Won't Do", "resolutionDate": "2018-07-16T18:27:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Allow Dynamic Resource Allocation for vmpooler Instances", "timeSpent": "PT0S", "updated": "2018-07-16T18:27:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:e2510a0a-1767-444f-be52-bae9bb15d3f6", "attachments": [], "comments": [{"author": "623c0cebbef8a60068c7977d", "body": "Hi [~accountid:557058:e2510a0a-1767-444f-be52-bae9bb15d3f6].  I assigned this ticket to you because I'd like to get some information about how often the jobs testing arista will be kicked off, so that we can size the pool appropriately.\n\nWill arista only be tested through one particular job, or will it be added as a part of matrices to other jobs?  How often will these jobs be run?", "created": "2015-10-23T09:46:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The pool sizes are adjustable in puppetlabs-modules. If we still need to adjust this we can make a change there.", "created": "2018-04-16T13:19:00.000000"}], "components": [], "created": "2015-10-23T09:45:00.000000", "creator": "623c0cebbef8a60068c7977d", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@38ae686a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyoim7:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "16/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_4094_*|*_6_*:*_1_*:*_0_*|*_10004_*:*_1_*:*_78291256148"}], "description": "I noticed this morning in my work for BKR-588 that I drained the arista pool in the vmpooler, because there were 5 hosts in it.\n\nNow that pipelines are being made for arista itself, I want to make sure that we know how that pipeline is going to be run, and make the sure the vmpooler is ready before that starts happening.  This ticket is to track getting that information, & making the required changes to vmpooler pool size.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10039", "fixedVersions": [], "id": "10039", "issueType": "Improvement", "key": "POOLER-19", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Major", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "623c0cebbef8a60068c7977d", "resolution": "Fixed", "resolutionDate": "2018-04-16T13:19:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Adjust vmpooler arista pool size based on expected usage", "timeSpent": "PT0S", "updated": "2018-04-16T13:19:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "attachments": [], "comments": [{"author": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "body": "Duplicate of POOLER-25.", "created": "2016-12-05T11:30:00.000000"}], "components": [], "created": "2015-10-20T12:16:00.000000", "creator": "63d40635a05386069cdb69d6", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@73d76bf5"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyoipr:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "05/Dec/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_864743902_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_34732874464"}], "description": "On hornet the vmpooler.log is 569M. We should probably enable logrotate there so those get rotated out and compressed. ", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10152", "fixedVersions": [], "id": "10152", "issueType": "New Feature", "key": "POOLER-8", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "63d40635a05386069cdb69d6", "resolution": "Duplicate", "resolutionDate": "2016-12-05T11:30:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Enable logrotate on vmpooler.log", "timeSpent": "PT0S", "updated": "2016-12-05T11:30:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "623c1349bef8a60068c79b09", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "The application has support for alternate backend providers now. If someone is compelled then it's possible to create a backend provider for openstack. However, I don't think that we have any plans to run infrastructure on openstack for testing in the near future so I cannot see justifying the work.", "created": "2018-04-18T18:32:00.000000"}], "components": [], "created": "2015-10-08T18:15:00.000000", "creator": "557058:63eb8bfe-2856-4cbd-ab65-a0e2d681f15c", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@59cb6c38"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz2ksv:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "19/Apr/18"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_48040110_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_79700177883"}], "description": "The SysOps SLICE project is slated to go production before the end of the year.  SLICE will provide everyone in the company access to their own compute resources and segregated tenant networking to go with it and is based off of OpenStack; this brings up two requests.\nh2. \n* The vmpooler needs a generic OpenStack backend\n** SLICE is basically our operational proving ground for OpenStack in all production aspects of SysOps managed infrastructure.\n** There is some time before we halt VMware expansion for ESO usage...\n** There will be hardening and improvements to SLICE and a deployment to replace SysOps' usage of VMware\n** After SysOps is fully into production, planning starts for ESO\n** We tentatively wish to not have to renew VMware licensing support within two years\n* It would be *fantastic* if the vmpooler could upon request from a user manage VMs inside an individual's tenant\n** The tenant already has built in quotas\n** People could have instant access to compute resources without taking resources from CI\n** People can do work without potentially affecting other's work", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10000", "fixedVersions": [], "id": "10000", "issueType": "Task", "key": "POOLER-13", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:63eb8bfe-2856-4cbd-ab65-a0e2d681f15c", "resolution": "Won't Do", "resolutionDate": "2018-04-18T18:32:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "OpenStack backend for vmpooler", "timeSpent": "PT0S", "updated": "2018-04-18T18:32:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "attachments": [], "comments": [{"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "It does this.", "created": "2016-11-17T10:24:00.000000"}], "components": [], "created": "2015-09-16T14:21:00.000000", "creator": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@3dbbfa4a"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Free ?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons", "value": "Yes"}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz28un:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "17/Nov/16"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_5000540513_*|*_6_*:*_1_*:*_0_*|*_10005_*:*_1_*:*_31967991230"}], "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10035", "fixedVersions": [], "id": "10035", "issueType": "Bug", "key": "POOLER-1", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "resolution": "Fixed", "resolutionDate": "2016-11-17T10:24:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "check_pending_vm should validate that :22 is available over TCP", "timeSpent": "PT0S", "updated": "2016-11-17T10:24:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "attachments": [], "comments": [], "components": ["VM Pooler"], "created": "2015-09-01T14:03:00.000000", "creator": "63d40635a05386069cdb69d6", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@482e8c93"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz1yiv:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "1.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_12379400_*|*_10007_*:*_1_*:*_515029035_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_89751965053"}], "description": "When you GET info on a vm you get \n\n{code}\n{\n  \"ok\": true,\n  \"ab9v5mhuz0aprd7\": {\n    \"template\": \"centos-7-x86_64\",\n    \"lifetime\": 12,\n    \"running\": 1.21,\n    \"state\": \"running\",\n    \"tags\": {\n      \"user\": \"stahnma\",\n      \"client\": \"vmpool-cli-0.2.1\"\n    },\n    \"domain\": \"delivery.puppetlabs.net\"\n  }\n}\n{code}\n\nHowever, all I see is 12. I have no idea when it's going to be reaped. If I look it now and 3 hours from now, I see the same JSON output. I'd like to see an expiration time/date. \n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10124", "fixedVersions": [], "id": "10124", "issueType": "Improvement", "key": "POOLER-81", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Minor", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "63d40635a05386069cdb69d6", "resolution": "Fixed", "resolutionDate": "2018-07-12T11:39:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Lifetime on vm is non deterministic", "timeSpent": "PT0S", "updated": "2018-07-12T11:39:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "623c0c4d7910a200718b910a", "body": "[~accountid:557058:a2383cac-f799-49a4-8b30-0d737d991e23] - do you think this is ready for engineering as-is, or do you need additional info?", "created": "2015-09-01T17:20:00.000000"}, {"author": "63d40635a05386069cdb69d6", "body": "Happy to walk through ins and outs of API ideas, or anything that's needed. ", "created": "2015-09-01T17:23:00.000000"}, {"author": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "body": "I think this can be \"ready for engineering\"", "created": "2015-09-01T21:54:00.000000"}, {"author": "557058:b3e6b0df-198d-45f8-9ba5-d41f90e80ced", "body": "+1 for AIX and SPARC. These platforms continue to be difficult to manage since they are managed differently from the rest of our testing infrastructure.\n\n[~accountid:70121:b72c16a2-594d-45f8-90bc-c6bc6c9510a8] suggested that if we were able to pool the RE and QE LPARs then the vmpooler would have 2 instances of each platform available on demand. The current token and ttl functionality would allow RE to capture these instances and hold them as long as they need them for now.", "created": "2015-09-09T11:39:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I believe we deployed at nspooler to resolve these issues. If I have missed something, or this is unrelated, please re-open the ticket.", "created": "2018-04-18T18:30:00.000000"}], "components": [], "created": "2015-08-28T15:19:00.000000", "creator": "63d40635a05386069cdb69d6", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@48e815b7"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz1x1b:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "01/Sep/15"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_412223273_*|*_6_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_82888876072"}], "description": "We'd like to use the vmpooler to front devices that are not just in vmware. This would allow us to use the same APIs to get machines, regardless of how they're setup.\n\nFor our immediate use-case, AIX is what we'd like to have, but eventually fronting network devices and Solaris SPARC could work.\n\nWhat we need is the concept of our system registering with the pooler as available in the pool, then having them reserved/used and then refilling. It might be best to have the refill issue a callback to run some type of destroy/cleanup task on a system rather than 100% reprovision it. \n\nWe should look into this and see what the difficulties would be. \n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10185", "fixedVersions": [], "id": "10185", "issueType": "Improvement", "key": "POOLER-17", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "63d40635a05386069cdb69d6", "resolution": "Won't Do", "resolutionDate": "2018-04-18T18:30:00.000000", "status": "Closed", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Scope allowing vmpooler to front other devices", "timeSpent": "PT0S", "updated": "2018-04-18T18:30:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [{"author": "557058:2249d8e6-5f8c-489f-942b-1c2739792d34", "body": "[~accountid:557058:a2383cac-f799-49a4-8b30-0d737d991e23] I think this is in your court to prioritize.", "created": "2015-08-28T10:31:00.000000"}, {"author": "557058:f27ef514-99cf-47f1-b8b7-9a1e9d466c72", "body": "I think this is possible via aliases. Is this still something that you would find useful [~accountid:63d40635a05386069cdb69d6]?", "created": "2018-04-18T18:34:00.000000"}, {"author": "557058:cd25c829-41d1-41be-b9ec-df76c04b00c2", "body": "[~accountid:63d40635a05386069cdb69d6] Is this still worthwhile or can I close the ticket?", "created": "2018-08-29T16:06:00.000000"}], "components": [], "created": "2015-08-19T11:33:00.000000", "creator": "63d40635a05386069cdb69d6", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@10dbb8bb"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hz1r87:"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "28/Aug/15"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_773904234_*|*_5_*:*_1_*:*_0_*|*_10004_*:*_1_*:*_113378174347"}], "description": "When testing against EL systems, today you must be explicit about wanting to test on CentOS or RHEL and we rarely test with Oracle Enterprise Linux or Scientific linux at all. I'd love to see a dynamic or virtual pool that randomly handed out an EL derivative when asked. \n\nexample:\n\nI want an el-6-i386 system. vmpooler could hand me any of centos, rhel, scientific, oracle. For my use cases, it doesn't even have to tell me what it gave me, just a host name should work.\n\nThis will help us ensure that our items work on all RHEL derived systems, and that our images/templates are working as well.\n\n", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10184", "fixedVersions": [], "id": "10184", "issueType": "New Feature", "key": "POOLER-7", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "63d40635a05386069cdb69d6", "resolution": "Won't Do", "resolutionDate": "2019-04-01T16:28:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Pooler should randomly hand out an EL system when asked", "timeSpent": "PT0S", "updated": "2019-04-01T16:28:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": "557058:7306c811-399c-4964-b8cc-74e0ece239a1", "attachments": [], "comments": [{"author": "557058:2249d8e6-5f8c-489f-942b-1c2739792d34", "body": "The PDB team is happy to manage their CI until QE can provide a reliable internal system, with automated management (JJB) that includes PR testing. I consider this blocked on the QENG-2213 & QENG-2215.", "created": "2015-04-15T16:33:00.000000"}, {"author": "557058:3a3de6c6-95b1-47b3-a52a-6a6865ff9241", "body": "Conservatively estimating this ticket at 8 points since there will be significant effort required from the PuppetDB team. Open also to splitting it into a 5-point QENG ticket and 3-point PDB ticket. [~accountid:557058:18109e62-1b97-4fa3-8217-9c25de3a4710] do you have any thoughts on this?", "created": "2015-07-17T18:13:00.000000"}, {"author": "557058:18109e62-1b97-4fa3-8217-9c25de3a4710", "body": "I'd call it 5 on our side", "created": "2015-07-17T18:15:00.000000"}, {"author": "557058:5d2971d3-3eb2-4f05-ab63-8a307eabd9c4", "body": "Switched to VMpooler and working per PR https://github.com/puppetlabs/puppetdb/pull/2252", "created": "2017-04-18T16:15:00.000000"}], "components": [], "created": "2015-04-10T16:42:00.000000", "creator": "623c0c4d7910a200718b910a", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@6e49a6"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyoipj:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "5.0"}, {"fieldName": "[CHART] Date of First Response", "fieldType": "com.atlassian.jira.ext.charting:firstresponsedate", "value": "15/Apr/15"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_8472668591_*|*_10007_*:*_1_*:*_49239892457_*|*_3_*:*_1_*:*_95144415_*|*_5_*:*_1_*:*_0_*|*_10004_*:*_1_*:*_1921679131_*|*_10006_*:*_1_*:*_4118627952"}], "description": "PuppetDB automated testing was moved to EC2 two years ago because our internal infrastructure couldn't handle it. Now that we have the infrastructure to handle testing, PuppetDB automated testing should be moved to internal infrastructure.\n\nOne-off testing that requires huge instances (such as c3.8xlarge) could continue to use EC2 for short amount of times.\n\nThis story should capture the work needed to make this happen.", "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10093", "fixedVersions": [], "id": "10093", "issueType": "Task", "key": "POOLER-11", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Trivial", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "623c0c4d7910a200718b910a", "resolution": "Fixed", "resolutionDate": "2017-04-18T16:16:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "PuppetDB automated testing should use vmpooler", "timeSpent": "PT0S", "updated": "2017-04-18T16:16:00.000000", "votes": "0", "watchers": []}, {"affectedVersions": [], "assignee": null, "attachments": [], "comments": [], "components": [], "created": "2015-02-25T10:55:00.000000", "creator": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "customFieldValues": [{"fieldName": "Sub-team", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Approvals", "fieldType": "com.atlassian.servicedesk.approvals-plugin:sd-approvals", "value": "com.atlassian.servicedesk.plugins.approvals.internal.customfield.ApprovalsCFValue@7d77081b"}, {"fieldName": "Epic/Theme", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:labels", "value": []}, {"fieldName": "Capitalized?", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:select", "value": "Yes"}, {"fieldName": "People Involved", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker", "value": []}, {"fieldName": "Flagged", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes", "value": []}, {"fieldName": "Rank", "fieldType": "com.pyxis.greenhopper.jira:gh-lexo-rank", "value": "0|hyykxz:"}, {"fieldName": "Story Points", "fieldType": "com.atlassian.jira.plugin.system.customfieldtypes:float", "value": "8.0"}, {"fieldName": "[CHART] Time in Status", "fieldType": "com.atlassian.jira.ext.charting:timeinstatus", "value": "1_*:*_1_*:*_14096927933_*|*_5_*:*_1_*:*_0_*|*_10006_*:*_1_*:*_97357478199"}], "epicLinkSummary": null, "estimate": "PT0S", "externalId": "10031", "fixedVersions": [], "id": "10031", "issueType": "Task", "key": "POOLER-12", "labels": [], "originalEstimate": "PT0S", "parent": null, "parentSummary": null, "priority": "Normal", "projectDescription": null, "projectKey": "POOLER", "projectLead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "projectName": "VM Pooler (Archived)", "projectType": "software", "projectUrl": null, "reporter": "557058:a2383cac-f799-49a4-8b30-0d737d991e23", "resolution": "Fixed", "resolutionDate": "2018-09-07T11:28:00.000000", "status": "Resolved", "statusCategory": "Done", "statuscategorychangedate": "11/Mar/23 9:10 AM", "summary": "Develop vmpooler acceptance tests / testing", "timeSpent": "PT0S", "updated": "2018-09-07T11:28:00.000000", "votes": "0", "watchers": []}], "key": "POOLER", "lead": "557058:ad3ba12f-a09f-4f5e-9fff-8660ad829629", "name": "VM Pooler (Archived)", "template": "com.pyxis.greenhopper.jira:gh-simplified-scrum-classic", "type": "software", "url": "", "versions": [{"name": "vmpooler-3.2.0", "releaseDate": "2023-06-26", "released": false}]}]}