Let's say you want to host domains first.com
and second.com
.
Create folders for their files:
#!/usr/bin/env python3 | |
import argparse | |
import base64 | |
import hashlib | |
import json | |
import os | |
import secrets | |
import sys | |
import time |
import tqdm | |
import numpy as np | |
import torch | |
import torch.distributed as dist | |
import transformers | |
def extract_xml_answer(text: str) -> str: | |
answer = text.split("<final_answer>")[-1] | |
answer = answer.split("</final_answer>")[0] | |
return answer.strip() |
# train_grpo.py | |
# | |
# See https://github.com/willccbb/verifiers for ongoing developments | |
# | |
""" | |
citation: | |
@misc{brown2025grpodemo, | |
title={Granular Format Rewards for Eliciting Mathematical Reasoning Capabilities in Small Language Models}, | |
author={Brown, William}, |
/* | |
the twitter api is stupid. it is stupid and bad and expensive. hence, this. | |
Literally just paste this in the JS console on the bookmarks tab and the script will automatically scroll to the bottom of your bookmarks and keep a track of them as it goes. | |
When finished, it downloads a JSON file containing the raw text content of every bookmark. | |
for now it stores just the text inside the tweet itself, but if you're reading this why don't you go ahead and try to also store other information (author, tweetLink, pictures, everything). come on. do it. please? | |
*/ |
['absl-py==1.4.0', | |
'affine==2.4.0', | |
'aiohttp==3.8.1', | |
'aiosignal==1.3.1', | |
'analytics-python==1.4.post1', | |
'anyio==3.7.1', | |
'anytree==2.8.0', | |
'argcomplete==1.10.3', | |
'argon2-cffi-bindings==21.2.0', | |
'argon2-cffi==21.3.0', |
import Foundation | |
import SceneKit | |
public enum BinarySTLParser { | |
public enum STLError: Error { | |
case fileTooSmall(size: Int) | |
case unexpectedFileSize(expected: Int, actual: Int) | |
case triangleCountMismatch(diff: Int) | |
} | |