| 98 | | add_proj_shortfall(p, dt) |
| 99 | | }}} |
| 100 | | ------------ |
| 101 | | '''prepare()''': called before exists_fetchable_project(). |
| 102 | | sees if there's project to req from for this resource, and caches it |
| 103 | | |
| 104 | | '''bool exists_fetchable_project()''': |
| 105 | | there's a project we can ask for work for this resource |
| 106 | | |
| 107 | | '''select_project(priority, char buf)''': |
| 108 | | if the importance of getting work for this resource is P, |
| 109 | | chooses and returns a PROJECT to request work from, |
| 110 | | and a string to put in the request message |
| 111 | | Choose the project for which LTD + expected payoff is largest |
| 112 | | |
| 113 | | Values for priority: |
| 114 | | * DONT_NEED: no shortfalls |
| 115 | | * NEED: a shortfall, but no idle devices right now |
| 116 | | * NEED_NOW: idle devices right now |
| 117 | | |
| 118 | | '''runnable_resource_share()''': total resource share of projects with |
| 119 | | runnable jobs for this resource. |
| 120 | | |
| 121 | | '''get_priority()''' |
| 122 | | |
| 123 | | '''bool count_towards_share(PROJECT p)''': |
| 124 | | whether to count p's resource share in the total for this rsc |
| 125 | | == whether we've got a job of this type in last 30 days |
| 126 | | |
| 127 | | '''add_shortfall(PROJECT, dt)''': |
| 128 | | add x to this project's shortfall, |
| 129 | | where x = dt*(share - instances used) |
| 130 | | |
| 131 | | '''double total_share()''': |
| 132 | | total resource share of projects we're counting |
| | 98 | p->PRSC_PROJECT_DATA.accumulate_shortfall(dt) |
| | 99 | }}} |
| | 100 | |
| | 101 | '''select_project()''': |
| | 102 | select the best project to request this type of work from. |
| | 103 | It's the project not backed off for this PRSC, |
| | 104 | and for which LTD + this->shortfall is largest |
| 187 | | send_req(p) |
| 188 | | switch cpu_work_fetch.priority |
| 189 | | case DONT_NEED |
| 190 | | set no_cpu in req message |
| 191 | | case NEED, NEED_NOW: |
| 192 | | work_req_sec = p.cpu_shortfall |
| 193 | | ncpus_idle = p.max_idle_cpus |
| 194 | | switch cuda_work_fetch.priority |
| 195 | | case DONT_NEED |
| 196 | | set no_cuda in the req message |
| 197 | | case NEED, NEED_NOW: |
| | 164 | if cuda_work_fetch.nidle |
| | 165 | cpu_work_fetch.shortfall = 0 |
| | 166 | p = cuda_work_fetch.select_project() |
| | 167 | if p |
| | 168 | send_req(p) |
| | 169 | return |
| | 170 | if cpu_work_fetch.nidle |
| | 171 | cuda_work_fetch.shortfall = 0 |
| | 172 | p = cpu_work_fetch.select_project() |
| | 173 | if p |
| | 174 | send_req(p) |
| | 175 | return |
| | 176 | if cuda_work_fetch.shortfall |
| | 177 | p = cuda_work_fetch.select_project() |
| | 178 | if p |
| | 179 | send_req(p) |
| | 180 | return |
| | 181 | if cpu_work_fetch.shortfall |
| | 182 | p = cpu_work_fetch.select_project() |
| | 183 | if p |
| | 184 | send_req(p) |
| | 185 | return |
| | 186 | |
| | 187 | void send_req(p) |
| | 188 | req.cpu_req_seconds = cpu_work_fetch.shortfall; |
| | 189 | req.cpu_req_ninstances = cpu_work_fetch.nidle |
| | 190 | req.cuda_req_seconds = cuda_work_fetch.shortfall; |